diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-12-15 02:21:21 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-12-15 02:21:30 -0500 |
commit | 6a54aebf6978e9f296a4d3da3e40af425163c22e (patch) | |
tree | 8217c7114db02d8b69c22fc44880749426949bc3 /fs | |
parent | 067491b7313c41f49607fce782d29344d1472587 (diff) | |
parent | dc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff) |
Merge commit 'v3.2-rc5' into sched/core
Merge reason: Pick up the latest fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs')
91 files changed, 2076 insertions, 1205 deletions
@@ -337,7 +337,7 @@ static void bio_fs_destructor(struct bio *bio) | |||
337 | * RETURNS: | 337 | * RETURNS: |
338 | * Pointer to new bio on success, NULL on failure. | 338 | * Pointer to new bio on success, NULL on failure. |
339 | */ | 339 | */ |
340 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | 340 | struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
341 | { | 341 | { |
342 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | 342 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); |
343 | 343 | ||
@@ -365,7 +365,7 @@ static void bio_kmalloc_destructor(struct bio *bio) | |||
365 | * %__GFP_WAIT, the allocation is guaranteed to succeed. | 365 | * %__GFP_WAIT, the allocation is guaranteed to succeed. |
366 | * | 366 | * |
367 | **/ | 367 | **/ |
368 | struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | 368 | struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
369 | { | 369 | { |
370 | struct bio *bio; | 370 | struct bio *bio; |
371 | 371 | ||
@@ -696,7 +696,8 @@ static void bio_free_map_data(struct bio_map_data *bmd) | |||
696 | kfree(bmd); | 696 | kfree(bmd); |
697 | } | 697 | } |
698 | 698 | ||
699 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | 699 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, |
700 | unsigned int iov_count, | ||
700 | gfp_t gfp_mask) | 701 | gfp_t gfp_mask) |
701 | { | 702 | { |
702 | struct bio_map_data *bmd; | 703 | struct bio_map_data *bmd; |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8855aad3929c..22c64fff1bd5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -683,7 +683,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref, | |||
683 | return PTR_ERR(fspath); | 683 | return PTR_ERR(fspath); |
684 | 684 | ||
685 | if (fspath > fspath_min) { | 685 | if (fspath > fspath_min) { |
686 | ipath->fspath->val[i] = (u64)fspath; | 686 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
687 | ++ipath->fspath->elem_cnt; | 687 | ++ipath->fspath->elem_cnt; |
688 | ipath->fspath->bytes_left = fspath - fspath_min; | 688 | ipath->fspath->bytes_left = fspath - fspath_min; |
689 | } else { | 689 | } else { |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0fe615e4ea38..dede441bdeee 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -514,10 +514,25 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, | |||
514 | struct btrfs_root *root, | 514 | struct btrfs_root *root, |
515 | struct extent_buffer *buf) | 515 | struct extent_buffer *buf) |
516 | { | 516 | { |
517 | /* ensure we can see the force_cow */ | ||
518 | smp_rmb(); | ||
519 | |||
520 | /* | ||
521 | * We do not need to cow a block if | ||
522 | * 1) this block is not created or changed in this transaction; | ||
523 | * 2) this block does not belong to TREE_RELOC tree; | ||
524 | * 3) the root is not forced COW. | ||
525 | * | ||
526 | * What is forced COW: | ||
527 | * when we create snapshot during commiting the transaction, | ||
528 | * after we've finished coping src root, we must COW the shared | ||
529 | * block to ensure the metadata consistency. | ||
530 | */ | ||
517 | if (btrfs_header_generation(buf) == trans->transid && | 531 | if (btrfs_header_generation(buf) == trans->transid && |
518 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && | 532 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && |
519 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && | 533 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
520 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) | 534 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && |
535 | !root->force_cow) | ||
521 | return 0; | 536 | return 0; |
522 | return 1; | 537 | return 1; |
523 | } | 538 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b9ba59ff9292..50634abef9b4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -848,7 +848,8 @@ struct btrfs_free_cluster { | |||
848 | enum btrfs_caching_type { | 848 | enum btrfs_caching_type { |
849 | BTRFS_CACHE_NO = 0, | 849 | BTRFS_CACHE_NO = 0, |
850 | BTRFS_CACHE_STARTED = 1, | 850 | BTRFS_CACHE_STARTED = 1, |
851 | BTRFS_CACHE_FINISHED = 2, | 851 | BTRFS_CACHE_FAST = 2, |
852 | BTRFS_CACHE_FINISHED = 3, | ||
852 | }; | 853 | }; |
853 | 854 | ||
854 | enum btrfs_disk_cache_state { | 855 | enum btrfs_disk_cache_state { |
@@ -1271,6 +1272,8 @@ struct btrfs_root { | |||
1271 | * for stat. It may be used for more later | 1272 | * for stat. It may be used for more later |
1272 | */ | 1273 | */ |
1273 | dev_t anon_dev; | 1274 | dev_t anon_dev; |
1275 | |||
1276 | int force_cow; | ||
1274 | }; | 1277 | }; |
1275 | 1278 | ||
1276 | struct btrfs_ioctl_defrag_range_args { | 1279 | struct btrfs_ioctl_defrag_range_args { |
@@ -2366,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
2366 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 2369 | int btrfs_block_rsv_refill(struct btrfs_root *root, |
2367 | struct btrfs_block_rsv *block_rsv, | 2370 | struct btrfs_block_rsv *block_rsv, |
2368 | u64 min_reserved); | 2371 | u64 min_reserved); |
2372 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
2373 | struct btrfs_block_rsv *block_rsv, | ||
2374 | u64 min_reserved); | ||
2369 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 2375 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
2370 | struct btrfs_block_rsv *dst_rsv, | 2376 | struct btrfs_block_rsv *dst_rsv, |
2371 | u64 num_bytes); | 2377 | u64 num_bytes); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 62afe5c5694e..632f8f3cc9db 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -620,7 +620,7 @@ out: | |||
620 | 620 | ||
621 | static int btree_io_failed_hook(struct bio *failed_bio, | 621 | static int btree_io_failed_hook(struct bio *failed_bio, |
622 | struct page *page, u64 start, u64 end, | 622 | struct page *page, u64 start, u64 end, |
623 | u64 mirror_num, struct extent_state *state) | 623 | int mirror_num, struct extent_state *state) |
624 | { | 624 | { |
625 | struct extent_io_tree *tree; | 625 | struct extent_io_tree *tree; |
626 | unsigned long len; | 626 | unsigned long len; |
@@ -2573,22 +2573,10 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2573 | int errors = 0; | 2573 | int errors = 0; |
2574 | u32 crc; | 2574 | u32 crc; |
2575 | u64 bytenr; | 2575 | u64 bytenr; |
2576 | int last_barrier = 0; | ||
2577 | 2576 | ||
2578 | if (max_mirrors == 0) | 2577 | if (max_mirrors == 0) |
2579 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; | 2578 | max_mirrors = BTRFS_SUPER_MIRROR_MAX; |
2580 | 2579 | ||
2581 | /* make sure only the last submit_bh does a barrier */ | ||
2582 | if (do_barriers) { | ||
2583 | for (i = 0; i < max_mirrors; i++) { | ||
2584 | bytenr = btrfs_sb_offset(i); | ||
2585 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= | ||
2586 | device->total_bytes) | ||
2587 | break; | ||
2588 | last_barrier = i; | ||
2589 | } | ||
2590 | } | ||
2591 | |||
2592 | for (i = 0; i < max_mirrors; i++) { | 2580 | for (i = 0; i < max_mirrors; i++) { |
2593 | bytenr = btrfs_sb_offset(i); | 2581 | bytenr = btrfs_sb_offset(i); |
2594 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) | 2582 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) |
@@ -2634,17 +2622,136 @@ static int write_dev_supers(struct btrfs_device *device, | |||
2634 | bh->b_end_io = btrfs_end_buffer_write_sync; | 2622 | bh->b_end_io = btrfs_end_buffer_write_sync; |
2635 | } | 2623 | } |
2636 | 2624 | ||
2637 | if (i == last_barrier && do_barriers) | 2625 | /* |
2638 | ret = submit_bh(WRITE_FLUSH_FUA, bh); | 2626 | * we fua the first super. The others we allow |
2639 | else | 2627 | * to go down lazy. |
2640 | ret = submit_bh(WRITE_SYNC, bh); | 2628 | */ |
2641 | 2629 | ret = submit_bh(WRITE_FUA, bh); | |
2642 | if (ret) | 2630 | if (ret) |
2643 | errors++; | 2631 | errors++; |
2644 | } | 2632 | } |
2645 | return errors < i ? 0 : -1; | 2633 | return errors < i ? 0 : -1; |
2646 | } | 2634 | } |
2647 | 2635 | ||
2636 | /* | ||
2637 | * endio for the write_dev_flush, this will wake anyone waiting | ||
2638 | * for the barrier when it is done | ||
2639 | */ | ||
2640 | static void btrfs_end_empty_barrier(struct bio *bio, int err) | ||
2641 | { | ||
2642 | if (err) { | ||
2643 | if (err == -EOPNOTSUPP) | ||
2644 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | ||
2645 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2646 | } | ||
2647 | if (bio->bi_private) | ||
2648 | complete(bio->bi_private); | ||
2649 | bio_put(bio); | ||
2650 | } | ||
2651 | |||
2652 | /* | ||
2653 | * trigger flushes for one the devices. If you pass wait == 0, the flushes are | ||
2654 | * sent down. With wait == 1, it waits for the previous flush. | ||
2655 | * | ||
2656 | * any device where the flush fails with eopnotsupp are flagged as not-barrier | ||
2657 | * capable | ||
2658 | */ | ||
2659 | static int write_dev_flush(struct btrfs_device *device, int wait) | ||
2660 | { | ||
2661 | struct bio *bio; | ||
2662 | int ret = 0; | ||
2663 | |||
2664 | if (device->nobarriers) | ||
2665 | return 0; | ||
2666 | |||
2667 | if (wait) { | ||
2668 | bio = device->flush_bio; | ||
2669 | if (!bio) | ||
2670 | return 0; | ||
2671 | |||
2672 | wait_for_completion(&device->flush_wait); | ||
2673 | |||
2674 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | ||
2675 | printk("btrfs: disabling barriers on dev %s\n", | ||
2676 | device->name); | ||
2677 | device->nobarriers = 1; | ||
2678 | } | ||
2679 | if (!bio_flagged(bio, BIO_UPTODATE)) { | ||
2680 | ret = -EIO; | ||
2681 | } | ||
2682 | |||
2683 | /* drop the reference from the wait == 0 run */ | ||
2684 | bio_put(bio); | ||
2685 | device->flush_bio = NULL; | ||
2686 | |||
2687 | return ret; | ||
2688 | } | ||
2689 | |||
2690 | /* | ||
2691 | * one reference for us, and we leave it for the | ||
2692 | * caller | ||
2693 | */ | ||
2694 | device->flush_bio = NULL;; | ||
2695 | bio = bio_alloc(GFP_NOFS, 0); | ||
2696 | if (!bio) | ||
2697 | return -ENOMEM; | ||
2698 | |||
2699 | bio->bi_end_io = btrfs_end_empty_barrier; | ||
2700 | bio->bi_bdev = device->bdev; | ||
2701 | init_completion(&device->flush_wait); | ||
2702 | bio->bi_private = &device->flush_wait; | ||
2703 | device->flush_bio = bio; | ||
2704 | |||
2705 | bio_get(bio); | ||
2706 | submit_bio(WRITE_FLUSH, bio); | ||
2707 | |||
2708 | return 0; | ||
2709 | } | ||
2710 | |||
2711 | /* | ||
2712 | * send an empty flush down to each device in parallel, | ||
2713 | * then wait for them | ||
2714 | */ | ||
2715 | static int barrier_all_devices(struct btrfs_fs_info *info) | ||
2716 | { | ||
2717 | struct list_head *head; | ||
2718 | struct btrfs_device *dev; | ||
2719 | int errors = 0; | ||
2720 | int ret; | ||
2721 | |||
2722 | /* send down all the barriers */ | ||
2723 | head = &info->fs_devices->devices; | ||
2724 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2725 | if (!dev->bdev) { | ||
2726 | errors++; | ||
2727 | continue; | ||
2728 | } | ||
2729 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2730 | continue; | ||
2731 | |||
2732 | ret = write_dev_flush(dev, 0); | ||
2733 | if (ret) | ||
2734 | errors++; | ||
2735 | } | ||
2736 | |||
2737 | /* wait for all the barriers */ | ||
2738 | list_for_each_entry_rcu(dev, head, dev_list) { | ||
2739 | if (!dev->bdev) { | ||
2740 | errors++; | ||
2741 | continue; | ||
2742 | } | ||
2743 | if (!dev->in_fs_metadata || !dev->writeable) | ||
2744 | continue; | ||
2745 | |||
2746 | ret = write_dev_flush(dev, 1); | ||
2747 | if (ret) | ||
2748 | errors++; | ||
2749 | } | ||
2750 | if (errors) | ||
2751 | return -EIO; | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2648 | int write_all_supers(struct btrfs_root *root, int max_mirrors) | 2755 | int write_all_supers(struct btrfs_root *root, int max_mirrors) |
2649 | { | 2756 | { |
2650 | struct list_head *head; | 2757 | struct list_head *head; |
@@ -2666,6 +2773,10 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
2666 | 2773 | ||
2667 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 2774 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
2668 | head = &root->fs_info->fs_devices->devices; | 2775 | head = &root->fs_info->fs_devices->devices; |
2776 | |||
2777 | if (do_barriers) | ||
2778 | barrier_all_devices(root->fs_info); | ||
2779 | |||
2669 | list_for_each_entry_rcu(dev, head, dev_list) { | 2780 | list_for_each_entry_rcu(dev, head, dev_list) { |
2670 | if (!dev->bdev) { | 2781 | if (!dev->bdev) { |
2671 | total_errors++; | 2782 | total_errors++; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b232150b5b6b..2ad813674d77 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
467 | struct btrfs_root *root, | 467 | struct btrfs_root *root, |
468 | int load_cache_only) | 468 | int load_cache_only) |
469 | { | 469 | { |
470 | DEFINE_WAIT(wait); | ||
470 | struct btrfs_fs_info *fs_info = cache->fs_info; | 471 | struct btrfs_fs_info *fs_info = cache->fs_info; |
471 | struct btrfs_caching_control *caching_ctl; | 472 | struct btrfs_caching_control *caching_ctl; |
472 | int ret = 0; | 473 | int ret = 0; |
473 | 474 | ||
474 | smp_mb(); | 475 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
475 | if (cache->cached != BTRFS_CACHE_NO) | 476 | BUG_ON(!caching_ctl); |
477 | |||
478 | INIT_LIST_HEAD(&caching_ctl->list); | ||
479 | mutex_init(&caching_ctl->mutex); | ||
480 | init_waitqueue_head(&caching_ctl->wait); | ||
481 | caching_ctl->block_group = cache; | ||
482 | caching_ctl->progress = cache->key.objectid; | ||
483 | atomic_set(&caching_ctl->count, 1); | ||
484 | caching_ctl->work.func = caching_thread; | ||
485 | |||
486 | spin_lock(&cache->lock); | ||
487 | /* | ||
488 | * This should be a rare occasion, but this could happen I think in the | ||
489 | * case where one thread starts to load the space cache info, and then | ||
490 | * some other thread starts a transaction commit which tries to do an | ||
491 | * allocation while the other thread is still loading the space cache | ||
492 | * info. The previous loop should have kept us from choosing this block | ||
493 | * group, but if we've moved to the state where we will wait on caching | ||
494 | * block groups we need to first check if we're doing a fast load here, | ||
495 | * so we can wait for it to finish, otherwise we could end up allocating | ||
496 | * from a block group who's cache gets evicted for one reason or | ||
497 | * another. | ||
498 | */ | ||
499 | while (cache->cached == BTRFS_CACHE_FAST) { | ||
500 | struct btrfs_caching_control *ctl; | ||
501 | |||
502 | ctl = cache->caching_ctl; | ||
503 | atomic_inc(&ctl->count); | ||
504 | prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); | ||
505 | spin_unlock(&cache->lock); | ||
506 | |||
507 | schedule(); | ||
508 | |||
509 | finish_wait(&ctl->wait, &wait); | ||
510 | put_caching_control(ctl); | ||
511 | spin_lock(&cache->lock); | ||
512 | } | ||
513 | |||
514 | if (cache->cached != BTRFS_CACHE_NO) { | ||
515 | spin_unlock(&cache->lock); | ||
516 | kfree(caching_ctl); | ||
476 | return 0; | 517 | return 0; |
518 | } | ||
519 | WARN_ON(cache->caching_ctl); | ||
520 | cache->caching_ctl = caching_ctl; | ||
521 | cache->cached = BTRFS_CACHE_FAST; | ||
522 | spin_unlock(&cache->lock); | ||
477 | 523 | ||
478 | /* | 524 | /* |
479 | * We can't do the read from on-disk cache during a commit since we need | 525 | * We can't do the read from on-disk cache during a commit since we need |
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, | |||
484 | if (trans && (!trans->transaction->in_commit) && | 530 | if (trans && (!trans->transaction->in_commit) && |
485 | (root && root != root->fs_info->tree_root) && | 531 | (root && root != root->fs_info->tree_root) && |
486 | btrfs_test_opt(root, SPACE_CACHE)) { | 532 | btrfs_test_opt(root, SPACE_CACHE)) { |
487 | spin_lock(&cache->lock); | ||
488 | if (cache->cached != BTRFS_CACHE_NO) { | ||
489 | spin_unlock(&cache->lock); | ||
490 | return 0; | ||
491 | } | ||
492 | cache->cached = BTRFS_CACHE_STARTED; | ||
493 | spin_unlock(&cache->lock); | ||
494 | |||
495 | ret = load_free_space_cache(fs_info, cache); | 533 | ret = load_free_space_cache(fs_info, cache); |
496 | 534 | ||
497 | spin_lock(&cache->lock); | 535 | spin_lock(&cache->lock); |
498 | if (ret == 1) { | 536 | if (ret == 1) { |
537 | cache->caching_ctl = NULL; | ||
499 | cache->cached = BTRFS_CACHE_FINISHED; | 538 | cache->cached = BTRFS_CACHE_FINISHED; |
500 | cache->last_byte_to_unpin = (u64)-1; | 539 | cache->last_byte_to_unpin = (u64)-1; |
501 | } else { | 540 | } else { |
502 | cache->cached = BTRFS_CACHE_NO; | 541 | if (load_cache_only) { |
542 | cache->caching_ctl = NULL; | ||
543 | cache->cached = BTRFS_CACHE_NO; | ||
544 | } else { | ||
545 | cache->cached = BTRFS_CACHE_STARTED; | ||
546 | } | ||
503 | } | 547 | } |
504 | spin_unlock(&cache->lock); | 548 | spin_unlock(&cache->lock); |
549 | wake_up(&caching_ctl->wait); | ||
505 | if (ret == 1) { | 550 | if (ret == 1) { |
551 | put_caching_control(caching_ctl); | ||
506 | free_excluded_extents(fs_info->extent_root, cache); | 552 | free_excluded_extents(fs_info->extent_root, cache); |
507 | return 0; | 553 | return 0; |
508 | } | 554 | } |
555 | } else { | ||
556 | /* | ||
557 | * We are not going to do the fast caching, set cached to the | ||
558 | * appropriate value and wakeup any waiters. | ||
559 | */ | ||
560 | spin_lock(&cache->lock); | ||
561 | if (load_cache_only) { | ||
562 | cache->caching_ctl = NULL; | ||
563 | cache->cached = BTRFS_CACHE_NO; | ||
564 | } else { | ||
565 | cache->cached = BTRFS_CACHE_STARTED; | ||
566 | } | ||
567 | spin_unlock(&cache->lock); | ||
568 | wake_up(&caching_ctl->wait); | ||
509 | } | 569 | } |
510 | 570 | ||
511 | if (load_cache_only) | 571 | if (load_cache_only) { |
512 | return 0; | 572 | put_caching_control(caching_ctl); |
513 | |||
514 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); | ||
515 | BUG_ON(!caching_ctl); | ||
516 | |||
517 | INIT_LIST_HEAD(&caching_ctl->list); | ||
518 | mutex_init(&caching_ctl->mutex); | ||
519 | init_waitqueue_head(&caching_ctl->wait); | ||
520 | caching_ctl->block_group = cache; | ||
521 | caching_ctl->progress = cache->key.objectid; | ||
522 | /* one for caching kthread, one for caching block group list */ | ||
523 | atomic_set(&caching_ctl->count, 2); | ||
524 | caching_ctl->work.func = caching_thread; | ||
525 | |||
526 | spin_lock(&cache->lock); | ||
527 | if (cache->cached != BTRFS_CACHE_NO) { | ||
528 | spin_unlock(&cache->lock); | ||
529 | kfree(caching_ctl); | ||
530 | return 0; | 573 | return 0; |
531 | } | 574 | } |
532 | cache->caching_ctl = caching_ctl; | ||
533 | cache->cached = BTRFS_CACHE_STARTED; | ||
534 | spin_unlock(&cache->lock); | ||
535 | 575 | ||
536 | down_write(&fs_info->extent_commit_sem); | 576 | down_write(&fs_info->extent_commit_sem); |
577 | atomic_inc(&caching_ctl->count); | ||
537 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | 578 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); |
538 | up_write(&fs_info->extent_commit_sem); | 579 | up_write(&fs_info->extent_commit_sem); |
539 | 580 | ||
@@ -3847,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root, | |||
3847 | return ret; | 3888 | return ret; |
3848 | } | 3889 | } |
3849 | 3890 | ||
3850 | int btrfs_block_rsv_refill(struct btrfs_root *root, | 3891 | static inline int __btrfs_block_rsv_refill(struct btrfs_root *root, |
3851 | struct btrfs_block_rsv *block_rsv, | 3892 | struct btrfs_block_rsv *block_rsv, |
3852 | u64 min_reserved) | 3893 | u64 min_reserved, int flush) |
3853 | { | 3894 | { |
3854 | u64 num_bytes = 0; | 3895 | u64 num_bytes = 0; |
3855 | int ret = -ENOSPC; | 3896 | int ret = -ENOSPC; |
@@ -3868,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3868 | if (!ret) | 3909 | if (!ret) |
3869 | return 0; | 3910 | return 0; |
3870 | 3911 | ||
3871 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1); | 3912 | ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); |
3872 | if (!ret) { | 3913 | if (!ret) { |
3873 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | 3914 | block_rsv_add_bytes(block_rsv, num_bytes, 0); |
3874 | return 0; | 3915 | return 0; |
@@ -3877,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root, | |||
3877 | return ret; | 3918 | return ret; |
3878 | } | 3919 | } |
3879 | 3920 | ||
3921 | int btrfs_block_rsv_refill(struct btrfs_root *root, | ||
3922 | struct btrfs_block_rsv *block_rsv, | ||
3923 | u64 min_reserved) | ||
3924 | { | ||
3925 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1); | ||
3926 | } | ||
3927 | |||
3928 | int btrfs_block_rsv_refill_noflush(struct btrfs_root *root, | ||
3929 | struct btrfs_block_rsv *block_rsv, | ||
3930 | u64 min_reserved) | ||
3931 | { | ||
3932 | return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0); | ||
3933 | } | ||
3934 | |||
3880 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | 3935 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, |
3881 | struct btrfs_block_rsv *dst_rsv, | 3936 | struct btrfs_block_rsv *dst_rsv, |
3882 | u64 num_bytes) | 3937 | u64 num_bytes) |
@@ -5052,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
5052 | struct btrfs_root *root = orig_root->fs_info->extent_root; | 5107 | struct btrfs_root *root = orig_root->fs_info->extent_root; |
5053 | struct btrfs_free_cluster *last_ptr = NULL; | 5108 | struct btrfs_free_cluster *last_ptr = NULL; |
5054 | struct btrfs_block_group_cache *block_group = NULL; | 5109 | struct btrfs_block_group_cache *block_group = NULL; |
5110 | struct btrfs_block_group_cache *used_block_group; | ||
5055 | int empty_cluster = 2 * 1024 * 1024; | 5111 | int empty_cluster = 2 * 1024 * 1024; |
5056 | int allowed_chunk_alloc = 0; | 5112 | int allowed_chunk_alloc = 0; |
5057 | int done_chunk_alloc = 0; | 5113 | int done_chunk_alloc = 0; |
5058 | struct btrfs_space_info *space_info; | 5114 | struct btrfs_space_info *space_info; |
5059 | int last_ptr_loop = 0; | ||
5060 | int loop = 0; | 5115 | int loop = 0; |
5061 | int index = 0; | 5116 | int index = 0; |
5062 | int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? | 5117 | int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? |
@@ -5118,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
5118 | ideal_cache: | 5173 | ideal_cache: |
5119 | block_group = btrfs_lookup_block_group(root->fs_info, | 5174 | block_group = btrfs_lookup_block_group(root->fs_info, |
5120 | search_start); | 5175 | search_start); |
5176 | used_block_group = block_group; | ||
5121 | /* | 5177 | /* |
5122 | * we don't want to use the block group if it doesn't match our | 5178 | * we don't want to use the block group if it doesn't match our |
5123 | * allocation bits, or if its not cached. | 5179 | * allocation bits, or if its not cached. |
@@ -5155,6 +5211,7 @@ search: | |||
5155 | u64 offset; | 5211 | u64 offset; |
5156 | int cached; | 5212 | int cached; |
5157 | 5213 | ||
5214 | used_block_group = block_group; | ||
5158 | btrfs_get_block_group(block_group); | 5215 | btrfs_get_block_group(block_group); |
5159 | search_start = block_group->key.objectid; | 5216 | search_start = block_group->key.objectid; |
5160 | 5217 | ||
@@ -5178,13 +5235,15 @@ search: | |||
5178 | } | 5235 | } |
5179 | 5236 | ||
5180 | have_block_group: | 5237 | have_block_group: |
5181 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { | 5238 | cached = block_group_cache_done(block_group); |
5239 | if (unlikely(!cached)) { | ||
5182 | u64 free_percent; | 5240 | u64 free_percent; |
5183 | 5241 | ||
5242 | found_uncached_bg = true; | ||
5184 | ret = cache_block_group(block_group, trans, | 5243 | ret = cache_block_group(block_group, trans, |
5185 | orig_root, 1); | 5244 | orig_root, 1); |
5186 | if (block_group->cached == BTRFS_CACHE_FINISHED) | 5245 | if (block_group->cached == BTRFS_CACHE_FINISHED) |
5187 | goto have_block_group; | 5246 | goto alloc; |
5188 | 5247 | ||
5189 | free_percent = btrfs_block_group_used(&block_group->item); | 5248 | free_percent = btrfs_block_group_used(&block_group->item); |
5190 | free_percent *= 100; | 5249 | free_percent *= 100; |
@@ -5206,7 +5265,6 @@ have_block_group: | |||
5206 | orig_root, 0); | 5265 | orig_root, 0); |
5207 | BUG_ON(ret); | 5266 | BUG_ON(ret); |
5208 | } | 5267 | } |
5209 | found_uncached_bg = true; | ||
5210 | 5268 | ||
5211 | /* | 5269 | /* |
5212 | * If loop is set for cached only, try the next block | 5270 | * If loop is set for cached only, try the next block |
@@ -5216,94 +5274,80 @@ have_block_group: | |||
5216 | goto loop; | 5274 | goto loop; |
5217 | } | 5275 | } |
5218 | 5276 | ||
5219 | cached = block_group_cache_done(block_group); | 5277 | alloc: |
5220 | if (unlikely(!cached)) | ||
5221 | found_uncached_bg = true; | ||
5222 | |||
5223 | if (unlikely(block_group->ro)) | 5278 | if (unlikely(block_group->ro)) |
5224 | goto loop; | 5279 | goto loop; |
5225 | 5280 | ||
5226 | spin_lock(&block_group->free_space_ctl->tree_lock); | 5281 | spin_lock(&block_group->free_space_ctl->tree_lock); |
5227 | if (cached && | 5282 | if (cached && |
5228 | block_group->free_space_ctl->free_space < | 5283 | block_group->free_space_ctl->free_space < |
5229 | num_bytes + empty_size) { | 5284 | num_bytes + empty_cluster + empty_size) { |
5230 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 5285 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
5231 | goto loop; | 5286 | goto loop; |
5232 | } | 5287 | } |
5233 | spin_unlock(&block_group->free_space_ctl->tree_lock); | 5288 | spin_unlock(&block_group->free_space_ctl->tree_lock); |
5234 | 5289 | ||
5235 | /* | 5290 | /* |
5236 | * Ok we want to try and use the cluster allocator, so lets look | 5291 | * Ok we want to try and use the cluster allocator, so |
5237 | * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will | 5292 | * lets look there |
5238 | * have tried the cluster allocator plenty of times at this | ||
5239 | * point and not have found anything, so we are likely way too | ||
5240 | * fragmented for the clustering stuff to find anything, so lets | ||
5241 | * just skip it and let the allocator find whatever block it can | ||
5242 | * find | ||
5243 | */ | 5293 | */ |
5244 | if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { | 5294 | if (last_ptr) { |
5245 | /* | 5295 | /* |
5246 | * the refill lock keeps out other | 5296 | * the refill lock keeps out other |
5247 | * people trying to start a new cluster | 5297 | * people trying to start a new cluster |
5248 | */ | 5298 | */ |
5249 | spin_lock(&last_ptr->refill_lock); | 5299 | spin_lock(&last_ptr->refill_lock); |
5250 | if (last_ptr->block_group && | 5300 | used_block_group = last_ptr->block_group; |
5251 | (last_ptr->block_group->ro || | 5301 | if (used_block_group != block_group && |
5252 | !block_group_bits(last_ptr->block_group, data))) { | 5302 | (!used_block_group || |
5253 | offset = 0; | 5303 | used_block_group->ro || |
5304 | !block_group_bits(used_block_group, data))) { | ||
5305 | used_block_group = block_group; | ||
5254 | goto refill_cluster; | 5306 | goto refill_cluster; |
5255 | } | 5307 | } |
5256 | 5308 | ||
5257 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, | 5309 | if (used_block_group != block_group) |
5258 | num_bytes, search_start); | 5310 | btrfs_get_block_group(used_block_group); |
5311 | |||
5312 | offset = btrfs_alloc_from_cluster(used_block_group, | ||
5313 | last_ptr, num_bytes, used_block_group->key.objectid); | ||
5259 | if (offset) { | 5314 | if (offset) { |
5260 | /* we have a block, we're done */ | 5315 | /* we have a block, we're done */ |
5261 | spin_unlock(&last_ptr->refill_lock); | 5316 | spin_unlock(&last_ptr->refill_lock); |
5262 | goto checks; | 5317 | goto checks; |
5263 | } | 5318 | } |
5264 | 5319 | ||
5265 | spin_lock(&last_ptr->lock); | 5320 | WARN_ON(last_ptr->block_group != used_block_group); |
5266 | /* | 5321 | if (used_block_group != block_group) { |
5267 | * whoops, this cluster doesn't actually point to | 5322 | btrfs_put_block_group(used_block_group); |
5268 | * this block group. Get a ref on the block | 5323 | used_block_group = block_group; |
5269 | * group is does point to and try again | ||
5270 | */ | ||
5271 | if (!last_ptr_loop && last_ptr->block_group && | ||
5272 | last_ptr->block_group != block_group && | ||
5273 | index <= | ||
5274 | get_block_group_index(last_ptr->block_group)) { | ||
5275 | |||
5276 | btrfs_put_block_group(block_group); | ||
5277 | block_group = last_ptr->block_group; | ||
5278 | btrfs_get_block_group(block_group); | ||
5279 | spin_unlock(&last_ptr->lock); | ||
5280 | spin_unlock(&last_ptr->refill_lock); | ||
5281 | |||
5282 | last_ptr_loop = 1; | ||
5283 | search_start = block_group->key.objectid; | ||
5284 | /* | ||
5285 | * we know this block group is properly | ||
5286 | * in the list because | ||
5287 | * btrfs_remove_block_group, drops the | ||
5288 | * cluster before it removes the block | ||
5289 | * group from the list | ||
5290 | */ | ||
5291 | goto have_block_group; | ||
5292 | } | 5324 | } |
5293 | spin_unlock(&last_ptr->lock); | ||
5294 | refill_cluster: | 5325 | refill_cluster: |
5326 | BUG_ON(used_block_group != block_group); | ||
5327 | /* If we are on LOOP_NO_EMPTY_SIZE, we can't | ||
5328 | * set up a new clusters, so lets just skip it | ||
5329 | * and let the allocator find whatever block | ||
5330 | * it can find. If we reach this point, we | ||
5331 | * will have tried the cluster allocator | ||
5332 | * plenty of times and not have found | ||
5333 | * anything, so we are likely way too | ||
5334 | * fragmented for the clustering stuff to find | ||
5335 | * anything. */ | ||
5336 | if (loop >= LOOP_NO_EMPTY_SIZE) { | ||
5337 | spin_unlock(&last_ptr->refill_lock); | ||
5338 | goto unclustered_alloc; | ||
5339 | } | ||
5340 | |||
5295 | /* | 5341 | /* |
5296 | * this cluster didn't work out, free it and | 5342 | * this cluster didn't work out, free it and |
5297 | * start over | 5343 | * start over |
5298 | */ | 5344 | */ |
5299 | btrfs_return_cluster_to_free_space(NULL, last_ptr); | 5345 | btrfs_return_cluster_to_free_space(NULL, last_ptr); |
5300 | 5346 | ||
5301 | last_ptr_loop = 0; | ||
5302 | |||
5303 | /* allocate a cluster in this block group */ | 5347 | /* allocate a cluster in this block group */ |
5304 | ret = btrfs_find_space_cluster(trans, root, | 5348 | ret = btrfs_find_space_cluster(trans, root, |
5305 | block_group, last_ptr, | 5349 | block_group, last_ptr, |
5306 | offset, num_bytes, | 5350 | search_start, num_bytes, |
5307 | empty_cluster + empty_size); | 5351 | empty_cluster + empty_size); |
5308 | if (ret == 0) { | 5352 | if (ret == 0) { |
5309 | /* | 5353 | /* |
@@ -5339,6 +5383,7 @@ refill_cluster: | |||
5339 | goto loop; | 5383 | goto loop; |
5340 | } | 5384 | } |
5341 | 5385 | ||
5386 | unclustered_alloc: | ||
5342 | offset = btrfs_find_space_for_alloc(block_group, search_start, | 5387 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
5343 | num_bytes, empty_size); | 5388 | num_bytes, empty_size); |
5344 | /* | 5389 | /* |
@@ -5365,14 +5410,14 @@ checks: | |||
5365 | search_start = stripe_align(root, offset); | 5410 | search_start = stripe_align(root, offset); |
5366 | /* move on to the next group */ | 5411 | /* move on to the next group */ |
5367 | if (search_start + num_bytes >= search_end) { | 5412 | if (search_start + num_bytes >= search_end) { |
5368 | btrfs_add_free_space(block_group, offset, num_bytes); | 5413 | btrfs_add_free_space(used_block_group, offset, num_bytes); |
5369 | goto loop; | 5414 | goto loop; |
5370 | } | 5415 | } |
5371 | 5416 | ||
5372 | /* move on to the next group */ | 5417 | /* move on to the next group */ |
5373 | if (search_start + num_bytes > | 5418 | if (search_start + num_bytes > |
5374 | block_group->key.objectid + block_group->key.offset) { | 5419 | used_block_group->key.objectid + used_block_group->key.offset) { |
5375 | btrfs_add_free_space(block_group, offset, num_bytes); | 5420 | btrfs_add_free_space(used_block_group, offset, num_bytes); |
5376 | goto loop; | 5421 | goto loop; |
5377 | } | 5422 | } |
5378 | 5423 | ||
@@ -5380,14 +5425,14 @@ checks: | |||
5380 | ins->offset = num_bytes; | 5425 | ins->offset = num_bytes; |
5381 | 5426 | ||
5382 | if (offset < search_start) | 5427 | if (offset < search_start) |
5383 | btrfs_add_free_space(block_group, offset, | 5428 | btrfs_add_free_space(used_block_group, offset, |
5384 | search_start - offset); | 5429 | search_start - offset); |
5385 | BUG_ON(offset > search_start); | 5430 | BUG_ON(offset > search_start); |
5386 | 5431 | ||
5387 | ret = btrfs_update_reserved_bytes(block_group, num_bytes, | 5432 | ret = btrfs_update_reserved_bytes(used_block_group, num_bytes, |
5388 | alloc_type); | 5433 | alloc_type); |
5389 | if (ret == -EAGAIN) { | 5434 | if (ret == -EAGAIN) { |
5390 | btrfs_add_free_space(block_group, offset, num_bytes); | 5435 | btrfs_add_free_space(used_block_group, offset, num_bytes); |
5391 | goto loop; | 5436 | goto loop; |
5392 | } | 5437 | } |
5393 | 5438 | ||
@@ -5396,15 +5441,19 @@ checks: | |||
5396 | ins->offset = num_bytes; | 5441 | ins->offset = num_bytes; |
5397 | 5442 | ||
5398 | if (offset < search_start) | 5443 | if (offset < search_start) |
5399 | btrfs_add_free_space(block_group, offset, | 5444 | btrfs_add_free_space(used_block_group, offset, |
5400 | search_start - offset); | 5445 | search_start - offset); |
5401 | BUG_ON(offset > search_start); | 5446 | BUG_ON(offset > search_start); |
5447 | if (used_block_group != block_group) | ||
5448 | btrfs_put_block_group(used_block_group); | ||
5402 | btrfs_put_block_group(block_group); | 5449 | btrfs_put_block_group(block_group); |
5403 | break; | 5450 | break; |
5404 | loop: | 5451 | loop: |
5405 | failed_cluster_refill = false; | 5452 | failed_cluster_refill = false; |
5406 | failed_alloc = false; | 5453 | failed_alloc = false; |
5407 | BUG_ON(index != get_block_group_index(block_group)); | 5454 | BUG_ON(index != get_block_group_index(block_group)); |
5455 | if (used_block_group != block_group) | ||
5456 | btrfs_put_block_group(used_block_group); | ||
5408 | btrfs_put_block_group(block_group); | 5457 | btrfs_put_block_group(block_group); |
5409 | } | 5458 | } |
5410 | up_read(&space_info->groups_sem); | 5459 | up_read(&space_info->groups_sem); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1f87c4d0e7a0..49f3c9dc09f4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -935,8 +935,10 @@ again: | |||
935 | node = tree_search(tree, start); | 935 | node = tree_search(tree, start); |
936 | if (!node) { | 936 | if (!node) { |
937 | prealloc = alloc_extent_state_atomic(prealloc); | 937 | prealloc = alloc_extent_state_atomic(prealloc); |
938 | if (!prealloc) | 938 | if (!prealloc) { |
939 | return -ENOMEM; | 939 | err = -ENOMEM; |
940 | goto out; | ||
941 | } | ||
940 | err = insert_state(tree, prealloc, start, end, &bits); | 942 | err = insert_state(tree, prealloc, start, end, &bits); |
941 | prealloc = NULL; | 943 | prealloc = NULL; |
942 | BUG_ON(err == -EEXIST); | 944 | BUG_ON(err == -EEXIST); |
@@ -992,8 +994,10 @@ hit_next: | |||
992 | */ | 994 | */ |
993 | if (state->start < start) { | 995 | if (state->start < start) { |
994 | prealloc = alloc_extent_state_atomic(prealloc); | 996 | prealloc = alloc_extent_state_atomic(prealloc); |
995 | if (!prealloc) | 997 | if (!prealloc) { |
996 | return -ENOMEM; | 998 | err = -ENOMEM; |
999 | goto out; | ||
1000 | } | ||
997 | err = split_state(tree, state, prealloc, start); | 1001 | err = split_state(tree, state, prealloc, start); |
998 | BUG_ON(err == -EEXIST); | 1002 | BUG_ON(err == -EEXIST); |
999 | prealloc = NULL; | 1003 | prealloc = NULL; |
@@ -1024,8 +1028,10 @@ hit_next: | |||
1024 | this_end = last_start - 1; | 1028 | this_end = last_start - 1; |
1025 | 1029 | ||
1026 | prealloc = alloc_extent_state_atomic(prealloc); | 1030 | prealloc = alloc_extent_state_atomic(prealloc); |
1027 | if (!prealloc) | 1031 | if (!prealloc) { |
1028 | return -ENOMEM; | 1032 | err = -ENOMEM; |
1033 | goto out; | ||
1034 | } | ||
1029 | 1035 | ||
1030 | /* | 1036 | /* |
1031 | * Avoid to free 'prealloc' if it can be merged with | 1037 | * Avoid to free 'prealloc' if it can be merged with |
@@ -1051,8 +1057,10 @@ hit_next: | |||
1051 | */ | 1057 | */ |
1052 | if (state->start <= end && state->end > end) { | 1058 | if (state->start <= end && state->end > end) { |
1053 | prealloc = alloc_extent_state_atomic(prealloc); | 1059 | prealloc = alloc_extent_state_atomic(prealloc); |
1054 | if (!prealloc) | 1060 | if (!prealloc) { |
1055 | return -ENOMEM; | 1061 | err = -ENOMEM; |
1062 | goto out; | ||
1063 | } | ||
1056 | 1064 | ||
1057 | err = split_state(tree, state, prealloc, end + 1); | 1065 | err = split_state(tree, state, prealloc, end + 1); |
1058 | BUG_ON(err == -EEXIST); | 1066 | BUG_ON(err == -EEXIST); |
@@ -2285,16 +2293,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2285 | clean_io_failure(start, page); | 2293 | clean_io_failure(start, page); |
2286 | } | 2294 | } |
2287 | if (!uptodate) { | 2295 | if (!uptodate) { |
2288 | u64 failed_mirror; | 2296 | int failed_mirror; |
2289 | failed_mirror = (u64)bio->bi_bdev; | 2297 | failed_mirror = (int)(unsigned long)bio->bi_bdev; |
2290 | if (tree->ops && tree->ops->readpage_io_failed_hook) | 2298 | /* |
2291 | ret = tree->ops->readpage_io_failed_hook( | 2299 | * The generic bio_readpage_error handles errors the |
2292 | bio, page, start, end, | 2300 | * following way: If possible, new read requests are |
2293 | failed_mirror, state); | 2301 | * created and submitted and will end up in |
2294 | else | 2302 | * end_bio_extent_readpage as well (if we're lucky, not |
2295 | ret = bio_readpage_error(bio, page, start, end, | 2303 | * in the !uptodate case). In that case it returns 0 and |
2296 | failed_mirror, NULL); | 2304 | * we just go on with the next page in our bio. If it |
2305 | * can't handle the error it will return -EIO and we | ||
2306 | * remain responsible for that page. | ||
2307 | */ | ||
2308 | ret = bio_readpage_error(bio, page, start, end, | ||
2309 | failed_mirror, NULL); | ||
2297 | if (ret == 0) { | 2310 | if (ret == 0) { |
2311 | error_handled: | ||
2298 | uptodate = | 2312 | uptodate = |
2299 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 2313 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
2300 | if (err) | 2314 | if (err) |
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2302 | uncache_state(&cached); | 2316 | uncache_state(&cached); |
2303 | continue; | 2317 | continue; |
2304 | } | 2318 | } |
2319 | if (tree->ops && tree->ops->readpage_io_failed_hook) { | ||
2320 | ret = tree->ops->readpage_io_failed_hook( | ||
2321 | bio, page, start, end, | ||
2322 | failed_mirror, state); | ||
2323 | if (ret == 0) | ||
2324 | goto error_handled; | ||
2325 | } | ||
2305 | } | 2326 | } |
2306 | 2327 | ||
2307 | if (uptodate) { | 2328 | if (uptodate) { |
@@ -3366,6 +3387,9 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3366 | return -ENOMEM; | 3387 | return -ENOMEM; |
3367 | path->leave_spinning = 1; | 3388 | path->leave_spinning = 1; |
3368 | 3389 | ||
3390 | start = ALIGN(start, BTRFS_I(inode)->root->sectorsize); | ||
3391 | len = ALIGN(len, BTRFS_I(inode)->root->sectorsize); | ||
3392 | |||
3369 | /* | 3393 | /* |
3370 | * lookup the last file extent. We're not using i_size here | 3394 | * lookup the last file extent. We're not using i_size here |
3371 | * because there might be preallocation past i_size | 3395 | * because there might be preallocation past i_size |
@@ -3413,7 +3437,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3413 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3437 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
3414 | &cached_state, GFP_NOFS); | 3438 | &cached_state, GFP_NOFS); |
3415 | 3439 | ||
3416 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | 3440 | em = get_extent_skip_holes(inode, start, last_for_get_extent, |
3417 | get_extent); | 3441 | get_extent); |
3418 | if (!em) | 3442 | if (!em) |
3419 | goto out; | 3443 | goto out; |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index feb9be0e23bc..7604c3001322 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -70,7 +70,7 @@ struct extent_io_ops { | |||
70 | unsigned long bio_flags); | 70 | unsigned long bio_flags); |
71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); | 71 | int (*readpage_io_hook)(struct page *page, u64 start, u64 end); |
72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, | 72 | int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, |
73 | u64 start, u64 end, u64 failed_mirror, | 73 | u64 start, u64 end, int failed_mirror, |
74 | struct extent_state *state); | 74 | struct extent_state *state); |
75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, | 75 | int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, |
76 | u64 start, u64 end, | 76 | u64 start, u64 end, |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 181760f9d2ab..ec23d43d0c35 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -351,6 +351,11 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, | |||
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | for (i = 0; i < io_ctl->num_pages; i++) { | ||
355 | clear_page_dirty_for_io(io_ctl->pages[i]); | ||
356 | set_page_extent_mapped(io_ctl->pages[i]); | ||
357 | } | ||
358 | |||
354 | return 0; | 359 | return 0; |
355 | } | 360 | } |
356 | 361 | ||
@@ -1465,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1465 | { | 1470 | { |
1466 | info->offset = offset_to_bitmap(ctl, offset); | 1471 | info->offset = offset_to_bitmap(ctl, offset); |
1467 | info->bytes = 0; | 1472 | info->bytes = 0; |
1473 | INIT_LIST_HEAD(&info->list); | ||
1468 | link_free_space(ctl, info); | 1474 | link_free_space(ctl, info); |
1469 | ctl->total_bitmaps++; | 1475 | ctl->total_bitmaps++; |
1470 | 1476 | ||
@@ -1844,7 +1850,13 @@ again: | |||
1844 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | 1850 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1845 | 1, 0); | 1851 | 1, 0); |
1846 | if (!info) { | 1852 | if (!info) { |
1847 | WARN_ON(1); | 1853 | /* the tree logging code might be calling us before we |
1854 | * have fully loaded the free space rbtree for this | ||
1855 | * block group. So it is possible the entry won't | ||
1856 | * be in the rbtree yet at all. The caching code | ||
1857 | * will make sure not to put it in the rbtree if | ||
1858 | * the logging code has pinned it. | ||
1859 | */ | ||
1848 | goto out_lock; | 1860 | goto out_lock; |
1849 | } | 1861 | } |
1850 | } | 1862 | } |
@@ -2308,6 +2320,7 @@ again: | |||
2308 | 2320 | ||
2309 | if (!found) { | 2321 | if (!found) { |
2310 | start = i; | 2322 | start = i; |
2323 | cluster->max_size = 0; | ||
2311 | found = true; | 2324 | found = true; |
2312 | } | 2325 | } |
2313 | 2326 | ||
@@ -2451,16 +2464,23 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2451 | { | 2464 | { |
2452 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2465 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2453 | struct btrfs_free_space *entry; | 2466 | struct btrfs_free_space *entry; |
2454 | struct rb_node *node; | ||
2455 | int ret = -ENOSPC; | 2467 | int ret = -ENOSPC; |
2468 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); | ||
2456 | 2469 | ||
2457 | if (ctl->total_bitmaps == 0) | 2470 | if (ctl->total_bitmaps == 0) |
2458 | return -ENOSPC; | 2471 | return -ENOSPC; |
2459 | 2472 | ||
2460 | /* | 2473 | /* |
2461 | * First check our cached list of bitmaps and see if there is an entry | 2474 | * The bitmap that covers offset won't be in the list unless offset |
2462 | * here that will work. | 2475 | * is just its start offset. |
2463 | */ | 2476 | */ |
2477 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | ||
2478 | if (entry->offset != bitmap_offset) { | ||
2479 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); | ||
2480 | if (entry && list_empty(&entry->list)) | ||
2481 | list_add(&entry->list, bitmaps); | ||
2482 | } | ||
2483 | |||
2464 | list_for_each_entry(entry, bitmaps, list) { | 2484 | list_for_each_entry(entry, bitmaps, list) { |
2465 | if (entry->bytes < min_bytes) | 2485 | if (entry->bytes < min_bytes) |
2466 | continue; | 2486 | continue; |
@@ -2471,38 +2491,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2471 | } | 2491 | } |
2472 | 2492 | ||
2473 | /* | 2493 | /* |
2474 | * If we do have entries on our list and we are here then we didn't find | 2494 | * The bitmaps list has all the bitmaps that record free space |
2475 | * anything, so go ahead and get the next entry after the last entry in | 2495 | * starting after offset, so no more search is required. |
2476 | * this list and start the search from there. | ||
2477 | */ | 2496 | */ |
2478 | if (!list_empty(bitmaps)) { | 2497 | return -ENOSPC; |
2479 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2480 | list); | ||
2481 | node = rb_next(&entry->offset_index); | ||
2482 | if (!node) | ||
2483 | return -ENOSPC; | ||
2484 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2485 | goto search; | ||
2486 | } | ||
2487 | |||
2488 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); | ||
2489 | if (!entry) | ||
2490 | return -ENOSPC; | ||
2491 | |||
2492 | search: | ||
2493 | node = &entry->offset_index; | ||
2494 | do { | ||
2495 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2496 | node = rb_next(&entry->offset_index); | ||
2497 | if (!entry->bitmap) | ||
2498 | continue; | ||
2499 | if (entry->bytes < min_bytes) | ||
2500 | continue; | ||
2501 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2502 | bytes, min_bytes); | ||
2503 | } while (ret && node); | ||
2504 | |||
2505 | return ret; | ||
2506 | } | 2498 | } |
2507 | 2499 | ||
2508 | /* | 2500 | /* |
@@ -2520,8 +2512,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2520 | u64 offset, u64 bytes, u64 empty_size) | 2512 | u64 offset, u64 bytes, u64 empty_size) |
2521 | { | 2513 | { |
2522 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2514 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2523 | struct list_head bitmaps; | ||
2524 | struct btrfs_free_space *entry, *tmp; | 2515 | struct btrfs_free_space *entry, *tmp; |
2516 | LIST_HEAD(bitmaps); | ||
2525 | u64 min_bytes; | 2517 | u64 min_bytes; |
2526 | int ret; | 2518 | int ret; |
2527 | 2519 | ||
@@ -2560,7 +2552,6 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2560 | goto out; | 2552 | goto out; |
2561 | } | 2553 | } |
2562 | 2554 | ||
2563 | INIT_LIST_HEAD(&bitmaps); | ||
2564 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, | 2555 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2565 | bytes, min_bytes); | 2556 | bytes, min_bytes); |
2566 | if (ret) | 2557 | if (ret) |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 116ab67a06df..2c984f7d4c2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3490,7 +3490,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
3490 | * doing the truncate. | 3490 | * doing the truncate. |
3491 | */ | 3491 | */ |
3492 | while (1) { | 3492 | while (1) { |
3493 | ret = btrfs_block_rsv_refill(root, rsv, min_size); | 3493 | ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); |
3494 | 3494 | ||
3495 | /* | 3495 | /* |
3496 | * Try and steal from the global reserve since we will | 3496 | * Try and steal from the global reserve since we will |
@@ -6794,11 +6794,13 @@ static int btrfs_getattr(struct vfsmount *mnt, | |||
6794 | struct dentry *dentry, struct kstat *stat) | 6794 | struct dentry *dentry, struct kstat *stat) |
6795 | { | 6795 | { |
6796 | struct inode *inode = dentry->d_inode; | 6796 | struct inode *inode = dentry->d_inode; |
6797 | u32 blocksize = inode->i_sb->s_blocksize; | ||
6798 | |||
6797 | generic_fillattr(inode, stat); | 6799 | generic_fillattr(inode, stat); |
6798 | stat->dev = BTRFS_I(inode)->root->anon_dev; | 6800 | stat->dev = BTRFS_I(inode)->root->anon_dev; |
6799 | stat->blksize = PAGE_CACHE_SIZE; | 6801 | stat->blksize = PAGE_CACHE_SIZE; |
6800 | stat->blocks = (inode_get_bytes(inode) + | 6802 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + |
6801 | BTRFS_I(inode)->delalloc_bytes) >> 9; | 6803 | ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; |
6802 | return 0; | 6804 | return 0; |
6803 | } | 6805 | } |
6804 | 6806 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4a34c472f126..72d461656f60 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1216,12 +1216,12 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1216 | *devstr = '\0'; | 1216 | *devstr = '\0'; |
1217 | devstr = vol_args->name; | 1217 | devstr = vol_args->name; |
1218 | devid = simple_strtoull(devstr, &end, 10); | 1218 | devid = simple_strtoull(devstr, &end, 10); |
1219 | printk(KERN_INFO "resizing devid %llu\n", | 1219 | printk(KERN_INFO "btrfs: resizing devid %llu\n", |
1220 | (unsigned long long)devid); | 1220 | (unsigned long long)devid); |
1221 | } | 1221 | } |
1222 | device = btrfs_find_device(root, devid, NULL, NULL); | 1222 | device = btrfs_find_device(root, devid, NULL, NULL); |
1223 | if (!device) { | 1223 | if (!device) { |
1224 | printk(KERN_INFO "resizer unable to find device %llu\n", | 1224 | printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", |
1225 | (unsigned long long)devid); | 1225 | (unsigned long long)devid); |
1226 | ret = -EINVAL; | 1226 | ret = -EINVAL; |
1227 | goto out_unlock; | 1227 | goto out_unlock; |
@@ -1267,7 +1267,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1267 | do_div(new_size, root->sectorsize); | 1267 | do_div(new_size, root->sectorsize); |
1268 | new_size *= root->sectorsize; | 1268 | new_size *= root->sectorsize; |
1269 | 1269 | ||
1270 | printk(KERN_INFO "new size for %s is %llu\n", | 1270 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", |
1271 | device->name, (unsigned long long)new_size); | 1271 | device->name, (unsigned long long)new_size); |
1272 | 1272 | ||
1273 | if (new_size > old_size) { | 1273 | if (new_size > old_size) { |
@@ -1278,7 +1278,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1278 | } | 1278 | } |
1279 | ret = btrfs_grow_device(trans, device, new_size); | 1279 | ret = btrfs_grow_device(trans, device, new_size); |
1280 | btrfs_commit_transaction(trans, root); | 1280 | btrfs_commit_transaction(trans, root); |
1281 | } else { | 1281 | } else if (new_size < old_size) { |
1282 | ret = btrfs_shrink_device(device, new_size); | 1282 | ret = btrfs_shrink_device(device, new_size); |
1283 | } | 1283 | } |
1284 | 1284 | ||
@@ -2930,11 +2930,13 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) | |||
2930 | goto out; | 2930 | goto out; |
2931 | 2931 | ||
2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { | 2932 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) { |
2933 | rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val; | 2933 | rel_ptr = ipath->fspath->val[i] - |
2934 | (u64)(unsigned long)ipath->fspath->val; | ||
2934 | ipath->fspath->val[i] = rel_ptr; | 2935 | ipath->fspath->val[i] = rel_ptr; |
2935 | } | 2936 | } |
2936 | 2937 | ||
2937 | ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size); | 2938 | ret = copy_to_user((void *)(unsigned long)ipa->fspath, |
2939 | (void *)(unsigned long)ipath->fspath, size); | ||
2938 | if (ret) { | 2940 | if (ret) { |
2939 | ret = -EFAULT; | 2941 | ret = -EFAULT; |
2940 | goto out; | 2942 | goto out; |
@@ -3017,7 +3019,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, | |||
3017 | if (ret < 0) | 3019 | if (ret < 0) |
3018 | goto out; | 3020 | goto out; |
3019 | 3021 | ||
3020 | ret = copy_to_user((void *)loi->inodes, (void *)inodes, size); | 3022 | ret = copy_to_user((void *)(unsigned long)loi->inodes, |
3023 | (void *)(unsigned long)inodes, size); | ||
3021 | if (ret) | 3024 | if (ret) |
3022 | ret = -EFAULT; | 3025 | ret = -EFAULT; |
3023 | 3026 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f4190f22edfb..c27bcb67f330 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
256 | btrfs_release_path(swarn->path); | 256 | btrfs_release_path(swarn->path); |
257 | 257 | ||
258 | ipath = init_ipath(4096, local_root, swarn->path); | 258 | ipath = init_ipath(4096, local_root, swarn->path); |
259 | if (IS_ERR(ipath)) { | ||
260 | ret = PTR_ERR(ipath); | ||
261 | ipath = NULL; | ||
262 | goto err; | ||
263 | } | ||
259 | ret = paths_from_inode(inum, ipath); | 264 | ret = paths_from_inode(inum, ipath); |
260 | 265 | ||
261 | if (ret < 0) | 266 | if (ret < 0) |
@@ -272,7 +277,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
272 | swarn->logical, swarn->dev->name, | 277 | swarn->logical, swarn->dev->name, |
273 | (unsigned long long)swarn->sector, root, inum, offset, | 278 | (unsigned long long)swarn->sector, root, inum, offset, |
274 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 279 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
275 | (char *)ipath->fspath->val[i]); | 280 | (char *)(unsigned long)ipath->fspath->val[i]); |
276 | 281 | ||
277 | free_ipath(ipath); | 282 | free_ipath(ipath); |
278 | return 0; | 283 | return 0; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8bd9d6d0e07a..e28ad4baf483 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -825,13 +825,9 @@ static char *setup_root_args(char *args) | |||
825 | static struct dentry *mount_subvol(const char *subvol_name, int flags, | 825 | static struct dentry *mount_subvol(const char *subvol_name, int flags, |
826 | const char *device_name, char *data) | 826 | const char *device_name, char *data) |
827 | { | 827 | { |
828 | struct super_block *s; | ||
829 | struct dentry *root; | 828 | struct dentry *root; |
830 | struct vfsmount *mnt; | 829 | struct vfsmount *mnt; |
831 | struct mnt_namespace *ns_private; | ||
832 | char *newargs; | 830 | char *newargs; |
833 | struct path path; | ||
834 | int error; | ||
835 | 831 | ||
836 | newargs = setup_root_args(data); | 832 | newargs = setup_root_args(data); |
837 | if (!newargs) | 833 | if (!newargs) |
@@ -842,39 +838,17 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags, | |||
842 | if (IS_ERR(mnt)) | 838 | if (IS_ERR(mnt)) |
843 | return ERR_CAST(mnt); | 839 | return ERR_CAST(mnt); |
844 | 840 | ||
845 | ns_private = create_mnt_ns(mnt); | 841 | root = mount_subtree(mnt, subvol_name); |
846 | if (IS_ERR(ns_private)) { | ||
847 | mntput(mnt); | ||
848 | return ERR_CAST(ns_private); | ||
849 | } | ||
850 | 842 | ||
851 | /* | 843 | if (!IS_ERR(root) && !is_subvolume_inode(root->d_inode)) { |
852 | * This will trigger the automount of the subvol so we can just | 844 | struct super_block *s = root->d_sb; |
853 | * drop the mnt we have here and return the dentry that we | 845 | dput(root); |
854 | * found. | 846 | root = ERR_PTR(-EINVAL); |
855 | */ | 847 | deactivate_locked_super(s); |
856 | error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name, | ||
857 | LOOKUP_FOLLOW, &path); | ||
858 | put_mnt_ns(ns_private); | ||
859 | if (error) | ||
860 | return ERR_PTR(error); | ||
861 | |||
862 | if (!is_subvolume_inode(path.dentry->d_inode)) { | ||
863 | path_put(&path); | ||
864 | mntput(mnt); | ||
865 | error = -EINVAL; | ||
866 | printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", | 848 | printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n", |
867 | subvol_name); | 849 | subvol_name); |
868 | return ERR_PTR(-EINVAL); | ||
869 | } | 850 | } |
870 | 851 | ||
871 | /* Get a ref to the sb and the dentry we found and return it */ | ||
872 | s = path.mnt->mnt_sb; | ||
873 | atomic_inc(&s->s_active); | ||
874 | root = dget(path.dentry); | ||
875 | path_put(&path); | ||
876 | down_write(&s->s_umount); | ||
877 | |||
878 | return root; | 852 | return root; |
879 | } | 853 | } |
880 | 854 | ||
@@ -1083,7 +1057,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1083 | int i = 0, nr_devices; | 1057 | int i = 0, nr_devices; |
1084 | int ret; | 1058 | int ret; |
1085 | 1059 | ||
1086 | nr_devices = fs_info->fs_devices->rw_devices; | 1060 | nr_devices = fs_info->fs_devices->open_devices; |
1087 | BUG_ON(!nr_devices); | 1061 | BUG_ON(!nr_devices); |
1088 | 1062 | ||
1089 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, | 1063 | devices_info = kmalloc(sizeof(*devices_info) * nr_devices, |
@@ -1105,8 +1079,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) | |||
1105 | else | 1079 | else |
1106 | min_stripe_size = BTRFS_STRIPE_LEN; | 1080 | min_stripe_size = BTRFS_STRIPE_LEN; |
1107 | 1081 | ||
1108 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { | 1082 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
1109 | if (!device->in_fs_metadata) | 1083 | if (!device->in_fs_metadata || !device->bdev) |
1110 | continue; | 1084 | continue; |
1111 | 1085 | ||
1112 | avail_space = device->total_bytes - device->bytes_used; | 1086 | avail_space = device->total_bytes - device->bytes_used; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6a0574e923bc..81376d94cd3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -785,6 +785,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
785 | 785 | ||
786 | btrfs_save_ino_cache(root, trans); | 786 | btrfs_save_ino_cache(root, trans); |
787 | 787 | ||
788 | /* see comments in should_cow_block() */ | ||
789 | root->force_cow = 0; | ||
790 | smp_wmb(); | ||
791 | |||
788 | if (root->commit_root != root->node) { | 792 | if (root->commit_root != root->node) { |
789 | mutex_lock(&root->fs_commit_mutex); | 793 | mutex_lock(&root->fs_commit_mutex); |
790 | switch_commit_root(root); | 794 | switch_commit_root(root); |
@@ -947,6 +951,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
947 | btrfs_tree_unlock(old); | 951 | btrfs_tree_unlock(old); |
948 | free_extent_buffer(old); | 952 | free_extent_buffer(old); |
949 | 953 | ||
954 | /* see comments in should_cow_block() */ | ||
955 | root->force_cow = 1; | ||
956 | smp_wmb(); | ||
957 | |||
950 | btrfs_set_root_node(new_root_item, tmp); | 958 | btrfs_set_root_node(new_root_item, tmp); |
951 | /* record when the snapshot was created in key.offset */ | 959 | /* record when the snapshot was created in key.offset */ |
952 | key.offset = trans->transid; | 960 | key.offset = trans->transid; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c37433d3cd82..0a8c8f8304b1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1611,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1611 | if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) | 1611 | if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) |
1612 | return -EINVAL; | 1612 | return -EINVAL; |
1613 | 1613 | ||
1614 | bdev = blkdev_get_by_path(device_path, FMODE_EXCL, | 1614 | bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, |
1615 | root->fs_info->bdev_holder); | 1615 | root->fs_info->bdev_holder); |
1616 | if (IS_ERR(bdev)) | 1616 | if (IS_ERR(bdev)) |
1617 | return PTR_ERR(bdev); | 1617 | return PTR_ERR(bdev); |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index ab5b1c49f352..78f2d4d4f37f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -100,6 +100,12 @@ struct btrfs_device { | |||
100 | struct reada_zone *reada_curr_zone; | 100 | struct reada_zone *reada_curr_zone; |
101 | struct radix_tree_root reada_zones; | 101 | struct radix_tree_root reada_zones; |
102 | struct radix_tree_root reada_extents; | 102 | struct radix_tree_root reada_extents; |
103 | |||
104 | /* for sending down flush barriers */ | ||
105 | struct bio *flush_bio; | ||
106 | struct completion flush_wait; | ||
107 | int nobarriers; | ||
108 | |||
103 | }; | 109 | }; |
104 | 110 | ||
105 | struct btrfs_fs_devices { | 111 | struct btrfs_fs_devices { |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 2abd0dfad7f8..bca3948e9dbf 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -1143,7 +1143,7 @@ static void ceph_d_prune(struct dentry *dentry) | |||
1143 | { | 1143 | { |
1144 | struct ceph_dentry_info *di; | 1144 | struct ceph_dentry_info *di; |
1145 | 1145 | ||
1146 | dout("d_release %p\n", dentry); | 1146 | dout("ceph_d_prune %p\n", dentry); |
1147 | 1147 | ||
1148 | /* do we have a valid parent? */ | 1148 | /* do we have a valid parent? */ |
1149 | if (!dentry->d_parent || IS_ROOT(dentry)) | 1149 | if (!dentry->d_parent || IS_ROOT(dentry)) |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e392bfce84a3..116f36502f17 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -1328,12 +1328,13 @@ int ceph_inode_set_size(struct inode *inode, loff_t size) | |||
1328 | */ | 1328 | */ |
1329 | void ceph_queue_writeback(struct inode *inode) | 1329 | void ceph_queue_writeback(struct inode *inode) |
1330 | { | 1330 | { |
1331 | ihold(inode); | ||
1331 | if (queue_work(ceph_inode_to_client(inode)->wb_wq, | 1332 | if (queue_work(ceph_inode_to_client(inode)->wb_wq, |
1332 | &ceph_inode(inode)->i_wb_work)) { | 1333 | &ceph_inode(inode)->i_wb_work)) { |
1333 | dout("ceph_queue_writeback %p\n", inode); | 1334 | dout("ceph_queue_writeback %p\n", inode); |
1334 | ihold(inode); | ||
1335 | } else { | 1335 | } else { |
1336 | dout("ceph_queue_writeback %p failed\n", inode); | 1336 | dout("ceph_queue_writeback %p failed\n", inode); |
1337 | iput(inode); | ||
1337 | } | 1338 | } |
1338 | } | 1339 | } |
1339 | 1340 | ||
@@ -1353,12 +1354,13 @@ static void ceph_writeback_work(struct work_struct *work) | |||
1353 | */ | 1354 | */ |
1354 | void ceph_queue_invalidate(struct inode *inode) | 1355 | void ceph_queue_invalidate(struct inode *inode) |
1355 | { | 1356 | { |
1357 | ihold(inode); | ||
1356 | if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, | 1358 | if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, |
1357 | &ceph_inode(inode)->i_pg_inv_work)) { | 1359 | &ceph_inode(inode)->i_pg_inv_work)) { |
1358 | dout("ceph_queue_invalidate %p\n", inode); | 1360 | dout("ceph_queue_invalidate %p\n", inode); |
1359 | ihold(inode); | ||
1360 | } else { | 1361 | } else { |
1361 | dout("ceph_queue_invalidate %p failed\n", inode); | 1362 | dout("ceph_queue_invalidate %p failed\n", inode); |
1363 | iput(inode); | ||
1362 | } | 1364 | } |
1363 | } | 1365 | } |
1364 | 1366 | ||
@@ -1434,13 +1436,14 @@ void ceph_queue_vmtruncate(struct inode *inode) | |||
1434 | { | 1436 | { |
1435 | struct ceph_inode_info *ci = ceph_inode(inode); | 1437 | struct ceph_inode_info *ci = ceph_inode(inode); |
1436 | 1438 | ||
1439 | ihold(inode); | ||
1437 | if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, | 1440 | if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, |
1438 | &ci->i_vmtruncate_work)) { | 1441 | &ci->i_vmtruncate_work)) { |
1439 | dout("ceph_queue_vmtruncate %p\n", inode); | 1442 | dout("ceph_queue_vmtruncate %p\n", inode); |
1440 | ihold(inode); | ||
1441 | } else { | 1443 | } else { |
1442 | dout("ceph_queue_vmtruncate %p failed, pending=%d\n", | 1444 | dout("ceph_queue_vmtruncate %p failed, pending=%d\n", |
1443 | inode, ci->i_truncate_pending); | 1445 | inode, ci->i_truncate_pending); |
1446 | iput(inode); | ||
1444 | } | 1447 | } |
1445 | } | 1448 | } |
1446 | 1449 | ||
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a90846fac759..8dc73a594a90 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -638,10 +638,12 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, | |||
638 | if (err == 0) { | 638 | if (err == 0) { |
639 | dout("open_root_inode success\n"); | 639 | dout("open_root_inode success\n"); |
640 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && | 640 | if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && |
641 | fsc->sb->s_root == NULL) | 641 | fsc->sb->s_root == NULL) { |
642 | root = d_alloc_root(req->r_target_inode); | 642 | root = d_alloc_root(req->r_target_inode); |
643 | else | 643 | ceph_init_dentry(root); |
644 | } else { | ||
644 | root = d_obtain_alias(req->r_target_inode); | 645 | root = d_obtain_alias(req->r_target_inode); |
646 | } | ||
645 | req->r_target_inode = NULL; | 647 | req->r_target_inode = NULL; |
646 | dout("open_root_inode success, root dentry is %p\n", root); | 648 | dout("open_root_inode success, root dentry is %p\n", root); |
647 | } else { | 649 | } else { |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d6a972df0338..8cd4b52d4217 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, | |||
441 | smb_msg.msg_controllen = 0; | 441 | smb_msg.msg_controllen = 0; |
442 | 442 | ||
443 | for (total_read = 0; to_read; total_read += length, to_read -= length) { | 443 | for (total_read = 0; to_read; total_read += length, to_read -= length) { |
444 | try_to_freeze(); | ||
445 | |||
444 | if (server_unresponsive(server)) { | 446 | if (server_unresponsive(server)) { |
445 | total_read = -EAGAIN; | 447 | total_read = -EAGAIN; |
446 | break; | 448 | break; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cf0b1539b321..4dd9283885e7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, | |||
702 | lock->type, lock->netfid, conf_lock); | 702 | lock->type, lock->netfid, conf_lock); |
703 | } | 703 | } |
704 | 704 | ||
705 | /* | ||
706 | * Check if there is another lock that prevents us to set the lock (mandatory | ||
707 | * style). If such a lock exists, update the flock structure with its | ||
708 | * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks | ||
709 | * or leave it the same if we can't. Returns 0 if we don't need to request to | ||
710 | * the server or 1 otherwise. | ||
711 | */ | ||
705 | static int | 712 | static int |
706 | cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, | 713 | cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, |
707 | __u8 type, __u16 netfid, struct file_lock *flock) | 714 | __u8 type, __u16 netfid, struct file_lock *flock) |
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock) | |||
739 | mutex_unlock(&cinode->lock_mutex); | 746 | mutex_unlock(&cinode->lock_mutex); |
740 | } | 747 | } |
741 | 748 | ||
749 | /* | ||
750 | * Set the byte-range lock (mandatory style). Returns: | ||
751 | * 1) 0, if we set the lock and don't need to request to the server; | ||
752 | * 2) 1, if no locks prevent us but we need to request to the server; | ||
753 | * 3) -EACCESS, if there is a lock that prevents us and wait is false. | ||
754 | */ | ||
742 | static int | 755 | static int |
743 | cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, | 756 | cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, |
744 | bool wait) | 757 | bool wait) |
@@ -778,6 +791,13 @@ try_again: | |||
778 | return rc; | 791 | return rc; |
779 | } | 792 | } |
780 | 793 | ||
794 | /* | ||
795 | * Check if there is another lock that prevents us to set the lock (posix | ||
796 | * style). If such a lock exists, update the flock structure with its | ||
797 | * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks | ||
798 | * or leave it the same if we can't. Returns 0 if we don't need to request to | ||
799 | * the server or 1 otherwise. | ||
800 | */ | ||
781 | static int | 801 | static int |
782 | cifs_posix_lock_test(struct file *file, struct file_lock *flock) | 802 | cifs_posix_lock_test(struct file *file, struct file_lock *flock) |
783 | { | 803 | { |
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock) | |||
800 | return rc; | 820 | return rc; |
801 | } | 821 | } |
802 | 822 | ||
823 | /* | ||
824 | * Set the byte-range lock (posix style). Returns: | ||
825 | * 1) 0, if we set the lock and don't need to request to the server; | ||
826 | * 2) 1, if we need to request to the server; | ||
827 | * 3) <0, if the error occurs while setting the lock. | ||
828 | */ | ||
803 | static int | 829 | static int |
804 | cifs_posix_lock_set(struct file *file, struct file_lock *flock) | 830 | cifs_posix_lock_set(struct file *file, struct file_lock *flock) |
805 | { | 831 | { |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 5de03ec20144..a090bbe6ee29 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, | |||
554 | rc); | 554 | rc); |
555 | return rc; | 555 | return rc; |
556 | } | 556 | } |
557 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); | 557 | /* FindFirst/Next set last_entry to NULL on malformed reply */ |
558 | if (cifsFile->srch_inf.last_entry) | ||
559 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, | ||
560 | cifsFile); | ||
558 | } | 561 | } |
559 | 562 | ||
560 | while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && | 563 | while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && |
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, | |||
562 | cFYI(1, "calling findnext2"); | 565 | cFYI(1, "calling findnext2"); |
563 | rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, | 566 | rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, |
564 | &cifsFile->srch_inf); | 567 | &cifsFile->srch_inf); |
565 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); | 568 | /* FindFirst/Next set last_entry to NULL on malformed reply */ |
569 | if (cifsFile->srch_inf.last_entry) | ||
570 | cifs_save_resume_key(cifsFile->srch_inf.last_entry, | ||
571 | cifsFile); | ||
566 | if (rc) | 572 | if (rc) |
567 | return -ENOENT; | 573 | return -ENOENT; |
568 | } | 574 | } |
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 7cacba12b8f1..80d850881938 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c | |||
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, | |||
209 | { | 209 | { |
210 | int rc; | 210 | int rc; |
211 | int len; | 211 | int len; |
212 | __u16 wpwd[129]; | 212 | __le16 wpwd[129]; |
213 | 213 | ||
214 | /* Password cannot be longer than 128 characters */ | 214 | /* Password cannot be longer than 128 characters */ |
215 | if (passwd) /* Password must be converted to NT unicode */ | 215 | if (passwd) /* Password must be converted to NT unicode */ |
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, | |||
219 | *wpwd = 0; /* Ensure string is null terminated */ | 219 | *wpwd = 0; /* Ensure string is null terminated */ |
220 | } | 220 | } |
221 | 221 | ||
222 | rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16)); | 222 | rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); |
223 | memset(wpwd, 0, 129 * sizeof(__u16)); | 223 | memset(wpwd, 0, 129 * sizeof(__le16)); |
224 | 224 | ||
225 | return rc; | 225 | return rc; |
226 | } | 226 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index a901c6901bce..89509b5a090e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/bit_spinlock.h> | 36 | #include <linux/bit_spinlock.h> |
37 | #include <linux/rculist_bl.h> | 37 | #include <linux/rculist_bl.h> |
38 | #include <linux/prefetch.h> | 38 | #include <linux/prefetch.h> |
39 | #include <linux/ratelimit.h> | ||
39 | #include "internal.h" | 40 | #include "internal.h" |
40 | 41 | ||
41 | /* | 42 | /* |
@@ -2383,8 +2384,16 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2383 | actual = __d_unalias(inode, dentry, alias); | 2384 | actual = __d_unalias(inode, dentry, alias); |
2384 | } | 2385 | } |
2385 | write_sequnlock(&rename_lock); | 2386 | write_sequnlock(&rename_lock); |
2386 | if (IS_ERR(actual)) | 2387 | if (IS_ERR(actual)) { |
2388 | if (PTR_ERR(actual) == -ELOOP) | ||
2389 | pr_warn_ratelimited( | ||
2390 | "VFS: Lookup of '%s' in %s %s" | ||
2391 | " would have caused loop\n", | ||
2392 | dentry->d_name.name, | ||
2393 | inode->i_sb->s_type->name, | ||
2394 | inode->i_sb->s_id); | ||
2387 | dput(alias); | 2395 | dput(alias); |
2396 | } | ||
2388 | goto out_nolock; | 2397 | goto out_nolock; |
2389 | } | 2398 | } |
2390 | } | 2399 | } |
@@ -2430,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) | |||
2430 | /** | 2439 | /** |
2431 | * prepend_path - Prepend path string to a buffer | 2440 | * prepend_path - Prepend path string to a buffer |
2432 | * @path: the dentry/vfsmount to report | 2441 | * @path: the dentry/vfsmount to report |
2433 | * @root: root vfsmnt/dentry (may be modified by this function) | 2442 | * @root: root vfsmnt/dentry |
2434 | * @buffer: pointer to the end of the buffer | 2443 | * @buffer: pointer to the end of the buffer |
2435 | * @buflen: pointer to buffer length | 2444 | * @buflen: pointer to buffer length |
2436 | * | 2445 | * |
2437 | * Caller holds the rename_lock. | 2446 | * Caller holds the rename_lock. |
2438 | * | ||
2439 | * If path is not reachable from the supplied root, then the value of | ||
2440 | * root is changed (without modifying refcounts). | ||
2441 | */ | 2447 | */ |
2442 | static int prepend_path(const struct path *path, struct path *root, | 2448 | static int prepend_path(const struct path *path, |
2449 | const struct path *root, | ||
2443 | char **buffer, int *buflen) | 2450 | char **buffer, int *buflen) |
2444 | { | 2451 | { |
2445 | struct dentry *dentry = path->dentry; | 2452 | struct dentry *dentry = path->dentry; |
@@ -2474,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root, | |||
2474 | dentry = parent; | 2481 | dentry = parent; |
2475 | } | 2482 | } |
2476 | 2483 | ||
2477 | out: | ||
2478 | if (!error && !slash) | 2484 | if (!error && !slash) |
2479 | error = prepend(buffer, buflen, "/", 1); | 2485 | error = prepend(buffer, buflen, "/", 1); |
2480 | 2486 | ||
2487 | out: | ||
2481 | br_read_unlock(vfsmount_lock); | 2488 | br_read_unlock(vfsmount_lock); |
2482 | return error; | 2489 | return error; |
2483 | 2490 | ||
@@ -2491,15 +2498,17 @@ global_root: | |||
2491 | WARN(1, "Root dentry has weird name <%.*s>\n", | 2498 | WARN(1, "Root dentry has weird name <%.*s>\n", |
2492 | (int) dentry->d_name.len, dentry->d_name.name); | 2499 | (int) dentry->d_name.len, dentry->d_name.name); |
2493 | } | 2500 | } |
2494 | root->mnt = vfsmnt; | 2501 | if (!slash) |
2495 | root->dentry = dentry; | 2502 | error = prepend(buffer, buflen, "/", 1); |
2503 | if (!error) | ||
2504 | error = vfsmnt->mnt_ns ? 1 : 2; | ||
2496 | goto out; | 2505 | goto out; |
2497 | } | 2506 | } |
2498 | 2507 | ||
2499 | /** | 2508 | /** |
2500 | * __d_path - return the path of a dentry | 2509 | * __d_path - return the path of a dentry |
2501 | * @path: the dentry/vfsmount to report | 2510 | * @path: the dentry/vfsmount to report |
2502 | * @root: root vfsmnt/dentry (may be modified by this function) | 2511 | * @root: root vfsmnt/dentry |
2503 | * @buf: buffer to return value in | 2512 | * @buf: buffer to return value in |
2504 | * @buflen: buffer length | 2513 | * @buflen: buffer length |
2505 | * | 2514 | * |
@@ -2510,10 +2519,10 @@ global_root: | |||
2510 | * | 2519 | * |
2511 | * "buflen" should be positive. | 2520 | * "buflen" should be positive. |
2512 | * | 2521 | * |
2513 | * If path is not reachable from the supplied root, then the value of | 2522 | * If the path is not reachable from the supplied root, return %NULL. |
2514 | * root is changed (without modifying refcounts). | ||
2515 | */ | 2523 | */ |
2516 | char *__d_path(const struct path *path, struct path *root, | 2524 | char *__d_path(const struct path *path, |
2525 | const struct path *root, | ||
2517 | char *buf, int buflen) | 2526 | char *buf, int buflen) |
2518 | { | 2527 | { |
2519 | char *res = buf + buflen; | 2528 | char *res = buf + buflen; |
@@ -2524,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root, | |||
2524 | error = prepend_path(path, root, &res, &buflen); | 2533 | error = prepend_path(path, root, &res, &buflen); |
2525 | write_sequnlock(&rename_lock); | 2534 | write_sequnlock(&rename_lock); |
2526 | 2535 | ||
2527 | if (error) | 2536 | if (error < 0) |
2537 | return ERR_PTR(error); | ||
2538 | if (error > 0) | ||
2539 | return NULL; | ||
2540 | return res; | ||
2541 | } | ||
2542 | |||
2543 | char *d_absolute_path(const struct path *path, | ||
2544 | char *buf, int buflen) | ||
2545 | { | ||
2546 | struct path root = {}; | ||
2547 | char *res = buf + buflen; | ||
2548 | int error; | ||
2549 | |||
2550 | prepend(&res, &buflen, "\0", 1); | ||
2551 | write_seqlock(&rename_lock); | ||
2552 | error = prepend_path(path, &root, &res, &buflen); | ||
2553 | write_sequnlock(&rename_lock); | ||
2554 | |||
2555 | if (error > 1) | ||
2556 | error = -EINVAL; | ||
2557 | if (error < 0) | ||
2528 | return ERR_PTR(error); | 2558 | return ERR_PTR(error); |
2529 | return res; | 2559 | return res; |
2530 | } | 2560 | } |
@@ -2532,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root, | |||
2532 | /* | 2562 | /* |
2533 | * same as __d_path but appends "(deleted)" for unlinked files. | 2563 | * same as __d_path but appends "(deleted)" for unlinked files. |
2534 | */ | 2564 | */ |
2535 | static int path_with_deleted(const struct path *path, struct path *root, | 2565 | static int path_with_deleted(const struct path *path, |
2536 | char **buf, int *buflen) | 2566 | const struct path *root, |
2567 | char **buf, int *buflen) | ||
2537 | { | 2568 | { |
2538 | prepend(buf, buflen, "\0", 1); | 2569 | prepend(buf, buflen, "\0", 1); |
2539 | if (d_unlinked(path->dentry)) { | 2570 | if (d_unlinked(path->dentry)) { |
@@ -2570,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen) | |||
2570 | { | 2601 | { |
2571 | char *res = buf + buflen; | 2602 | char *res = buf + buflen; |
2572 | struct path root; | 2603 | struct path root; |
2573 | struct path tmp; | ||
2574 | int error; | 2604 | int error; |
2575 | 2605 | ||
2576 | /* | 2606 | /* |
@@ -2585,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen) | |||
2585 | 2615 | ||
2586 | get_fs_root(current->fs, &root); | 2616 | get_fs_root(current->fs, &root); |
2587 | write_seqlock(&rename_lock); | 2617 | write_seqlock(&rename_lock); |
2588 | tmp = root; | 2618 | error = path_with_deleted(path, &root, &res, &buflen); |
2589 | error = path_with_deleted(path, &tmp, &res, &buflen); | 2619 | if (error < 0) |
2590 | if (error) | ||
2591 | res = ERR_PTR(error); | 2620 | res = ERR_PTR(error); |
2592 | write_sequnlock(&rename_lock); | 2621 | write_sequnlock(&rename_lock); |
2593 | path_put(&root); | 2622 | path_put(&root); |
@@ -2608,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) | |||
2608 | { | 2637 | { |
2609 | char *res = buf + buflen; | 2638 | char *res = buf + buflen; |
2610 | struct path root; | 2639 | struct path root; |
2611 | struct path tmp; | ||
2612 | int error; | 2640 | int error; |
2613 | 2641 | ||
2614 | if (path->dentry->d_op && path->dentry->d_op->d_dname) | 2642 | if (path->dentry->d_op && path->dentry->d_op->d_dname) |
@@ -2616,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) | |||
2616 | 2644 | ||
2617 | get_fs_root(current->fs, &root); | 2645 | get_fs_root(current->fs, &root); |
2618 | write_seqlock(&rename_lock); | 2646 | write_seqlock(&rename_lock); |
2619 | tmp = root; | 2647 | error = path_with_deleted(path, &root, &res, &buflen); |
2620 | error = path_with_deleted(path, &tmp, &res, &buflen); | 2648 | if (error > 0) |
2621 | if (!error && !path_equal(&tmp, &root)) | ||
2622 | error = prepend_unreachable(&res, &buflen); | 2649 | error = prepend_unreachable(&res, &buflen); |
2623 | write_sequnlock(&rename_lock); | 2650 | write_sequnlock(&rename_lock); |
2624 | path_put(&root); | 2651 | path_put(&root); |
@@ -2749,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) | |||
2749 | write_seqlock(&rename_lock); | 2776 | write_seqlock(&rename_lock); |
2750 | if (!d_unlinked(pwd.dentry)) { | 2777 | if (!d_unlinked(pwd.dentry)) { |
2751 | unsigned long len; | 2778 | unsigned long len; |
2752 | struct path tmp = root; | ||
2753 | char *cwd = page + PAGE_SIZE; | 2779 | char *cwd = page + PAGE_SIZE; |
2754 | int buflen = PAGE_SIZE; | 2780 | int buflen = PAGE_SIZE; |
2755 | 2781 | ||
2756 | prepend(&cwd, &buflen, "\0", 1); | 2782 | prepend(&cwd, &buflen, "\0", 1); |
2757 | error = prepend_path(&pwd, &tmp, &cwd, &buflen); | 2783 | error = prepend_path(&pwd, &root, &cwd, &buflen); |
2758 | write_sequnlock(&rename_lock); | 2784 | write_sequnlock(&rename_lock); |
2759 | 2785 | ||
2760 | if (error) | 2786 | if (error < 0) |
2761 | goto out; | 2787 | goto out; |
2762 | 2788 | ||
2763 | /* Unreachable from current root */ | 2789 | /* Unreachable from current root */ |
2764 | if (!path_equal(&tmp, &root)) { | 2790 | if (error > 0) { |
2765 | error = prepend_unreachable(&cwd, &buflen); | 2791 | error = prepend_unreachable(&cwd, &buflen); |
2766 | if (error) | 2792 | if (error) |
2767 | goto out; | 2793 | goto out; |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 58609bde3b9f..2a834255c75d 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals( | |||
967 | 967 | ||
968 | /** | 968 | /** |
969 | * ecryptfs_new_file_context | 969 | * ecryptfs_new_file_context |
970 | * @ecryptfs_dentry: The eCryptfs dentry | 970 | * @ecryptfs_inode: The eCryptfs inode |
971 | * | 971 | * |
972 | * If the crypto context for the file has not yet been established, | 972 | * If the crypto context for the file has not yet been established, |
973 | * this is where we do that. Establishing a new crypto context | 973 | * this is where we do that. Establishing a new crypto context |
@@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals( | |||
984 | * | 984 | * |
985 | * Returns zero on success; non-zero otherwise | 985 | * Returns zero on success; non-zero otherwise |
986 | */ | 986 | */ |
987 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry) | 987 | int ecryptfs_new_file_context(struct inode *ecryptfs_inode) |
988 | { | 988 | { |
989 | struct ecryptfs_crypt_stat *crypt_stat = | 989 | struct ecryptfs_crypt_stat *crypt_stat = |
990 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 990 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
991 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | 991 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = |
992 | &ecryptfs_superblock_to_private( | 992 | &ecryptfs_superblock_to_private( |
993 | ecryptfs_dentry->d_sb)->mount_crypt_stat; | 993 | ecryptfs_inode->i_sb)->mount_crypt_stat; |
994 | int cipher_name_len; | 994 | int cipher_name_len; |
995 | int rc = 0; | 995 | int rc = 0; |
996 | 996 | ||
@@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, | |||
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | static int | 1301 | static int |
1302 | ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry, | 1302 | ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode, |
1303 | char *virt, size_t virt_len) | 1303 | char *virt, size_t virt_len) |
1304 | { | 1304 | { |
1305 | int rc; | 1305 | int rc; |
1306 | 1306 | ||
1307 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, | 1307 | rc = ecryptfs_write_lower(ecryptfs_inode, virt, |
1308 | 0, virt_len); | 1308 | 0, virt_len); |
1309 | if (rc < 0) | 1309 | if (rc < 0) |
1310 | printk(KERN_ERR "%s: Error attempting to write header " | 1310 | printk(KERN_ERR "%s: Error attempting to write header " |
@@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, | |||
1338 | 1338 | ||
1339 | /** | 1339 | /** |
1340 | * ecryptfs_write_metadata | 1340 | * ecryptfs_write_metadata |
1341 | * @ecryptfs_dentry: The eCryptfs dentry | 1341 | * @ecryptfs_dentry: The eCryptfs dentry, which should be negative |
1342 | * @ecryptfs_inode: The newly created eCryptfs inode | ||
1342 | * | 1343 | * |
1343 | * Write the file headers out. This will likely involve a userspace | 1344 | * Write the file headers out. This will likely involve a userspace |
1344 | * callout, in which the session key is encrypted with one or more | 1345 | * callout, in which the session key is encrypted with one or more |
@@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, | |||
1348 | * | 1349 | * |
1349 | * Returns zero on success; non-zero on error | 1350 | * Returns zero on success; non-zero on error |
1350 | */ | 1351 | */ |
1351 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | 1352 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, |
1353 | struct inode *ecryptfs_inode) | ||
1352 | { | 1354 | { |
1353 | struct ecryptfs_crypt_stat *crypt_stat = | 1355 | struct ecryptfs_crypt_stat *crypt_stat = |
1354 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 1356 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
1355 | unsigned int order; | 1357 | unsigned int order; |
1356 | char *virt; | 1358 | char *virt; |
1357 | size_t virt_len; | 1359 | size_t virt_len; |
@@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1391 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, | 1393 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt, |
1392 | size); | 1394 | size); |
1393 | else | 1395 | else |
1394 | rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt, | 1396 | rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt, |
1395 | virt_len); | 1397 | virt_len); |
1396 | if (rc) { | 1398 | if (rc) { |
1397 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " | 1399 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " |
@@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD" | |||
1943 | 1945 | ||
1944 | /* We could either offset on every reverse map or just pad some 0x00's | 1946 | /* We could either offset on every reverse map or just pad some 0x00's |
1945 | * at the front here */ | 1947 | * at the front here */ |
1946 | static const unsigned char filename_rev_map[] = { | 1948 | static const unsigned char filename_rev_map[256] = { |
1947 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ | 1949 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ |
1948 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ | 1950 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ |
1949 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ | 1951 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ |
@@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = { | |||
1959 | 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ | 1961 | 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */ |
1960 | 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ | 1962 | 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */ |
1961 | 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ | 1963 | 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */ |
1962 | 0x3D, 0x3E, 0x3F | 1964 | 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */ |
1963 | }; | 1965 | }; |
1964 | 1966 | ||
1965 | /** | 1967 | /** |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 54481a3b2c79..a9f29b12fbf2 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat); | |||
584 | int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); | 584 | int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode); |
585 | int ecryptfs_encrypt_page(struct page *page); | 585 | int ecryptfs_encrypt_page(struct page *page); |
586 | int ecryptfs_decrypt_page(struct page *page); | 586 | int ecryptfs_decrypt_page(struct page *page); |
587 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); | 587 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry, |
588 | struct inode *ecryptfs_inode); | ||
588 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); | 589 | int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); |
589 | int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); | 590 | int ecryptfs_new_file_context(struct inode *ecryptfs_inode); |
590 | void ecryptfs_write_crypt_stat_flags(char *page_virt, | 591 | void ecryptfs_write_crypt_stat_flags(char *page_virt, |
591 | struct ecryptfs_crypt_stat *crypt_stat, | 592 | struct ecryptfs_crypt_stat *crypt_stat, |
592 | size_t *written); | 593 | size_t *written); |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c6ac98cf9baa..d3f95f941c47 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -139,6 +139,27 @@ out: | |||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
141 | 141 | ||
142 | static void ecryptfs_vma_close(struct vm_area_struct *vma) | ||
143 | { | ||
144 | filemap_write_and_wait(vma->vm_file->f_mapping); | ||
145 | } | ||
146 | |||
147 | static const struct vm_operations_struct ecryptfs_file_vm_ops = { | ||
148 | .close = ecryptfs_vma_close, | ||
149 | .fault = filemap_fault, | ||
150 | }; | ||
151 | |||
152 | static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma) | ||
153 | { | ||
154 | int rc; | ||
155 | |||
156 | rc = generic_file_mmap(file, vma); | ||
157 | if (!rc) | ||
158 | vma->vm_ops = &ecryptfs_file_vm_ops; | ||
159 | |||
160 | return rc; | ||
161 | } | ||
162 | |||
142 | struct kmem_cache *ecryptfs_file_info_cache; | 163 | struct kmem_cache *ecryptfs_file_info_cache; |
143 | 164 | ||
144 | /** | 165 | /** |
@@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = { | |||
349 | #ifdef CONFIG_COMPAT | 370 | #ifdef CONFIG_COMPAT |
350 | .compat_ioctl = ecryptfs_compat_ioctl, | 371 | .compat_ioctl = ecryptfs_compat_ioctl, |
351 | #endif | 372 | #endif |
352 | .mmap = generic_file_mmap, | 373 | .mmap = ecryptfs_file_mmap, |
353 | .open = ecryptfs_open, | 374 | .open = ecryptfs_open, |
354 | .flush = ecryptfs_flush, | 375 | .flush = ecryptfs_flush, |
355 | .release = ecryptfs_release, | 376 | .release = ecryptfs_release, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index a36d327f1521..32f90a3ae63e 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, | |||
172 | * it. It will also update the eCryptfs directory inode to mimic the | 172 | * it. It will also update the eCryptfs directory inode to mimic the |
173 | * stat of the lower directory inode. | 173 | * stat of the lower directory inode. |
174 | * | 174 | * |
175 | * Returns zero on success; non-zero on error condition | 175 | * Returns the new eCryptfs inode on success; an ERR_PTR on error condition |
176 | */ | 176 | */ |
177 | static int | 177 | static struct inode * |
178 | ecryptfs_do_create(struct inode *directory_inode, | 178 | ecryptfs_do_create(struct inode *directory_inode, |
179 | struct dentry *ecryptfs_dentry, int mode) | 179 | struct dentry *ecryptfs_dentry, int mode) |
180 | { | 180 | { |
181 | int rc; | 181 | int rc; |
182 | struct dentry *lower_dentry; | 182 | struct dentry *lower_dentry; |
183 | struct dentry *lower_dir_dentry; | 183 | struct dentry *lower_dir_dentry; |
184 | struct inode *inode; | ||
184 | 185 | ||
185 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); | 186 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); |
186 | lower_dir_dentry = lock_parent(lower_dentry); | 187 | lower_dir_dentry = lock_parent(lower_dentry); |
187 | if (IS_ERR(lower_dir_dentry)) { | 188 | if (IS_ERR(lower_dir_dentry)) { |
188 | ecryptfs_printk(KERN_ERR, "Error locking directory of " | 189 | ecryptfs_printk(KERN_ERR, "Error locking directory of " |
189 | "dentry\n"); | 190 | "dentry\n"); |
190 | rc = PTR_ERR(lower_dir_dentry); | 191 | inode = ERR_CAST(lower_dir_dentry); |
191 | goto out; | 192 | goto out; |
192 | } | 193 | } |
193 | rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, | 194 | rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, |
@@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode, | |||
195 | if (rc) { | 196 | if (rc) { |
196 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " | 197 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " |
197 | "rc = [%d]\n", __func__, rc); | 198 | "rc = [%d]\n", __func__, rc); |
199 | inode = ERR_PTR(rc); | ||
198 | goto out_lock; | 200 | goto out_lock; |
199 | } | 201 | } |
200 | rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, | 202 | inode = __ecryptfs_get_inode(lower_dentry->d_inode, |
201 | directory_inode->i_sb); | 203 | directory_inode->i_sb); |
202 | if (rc) { | 204 | if (IS_ERR(inode)) |
203 | ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n"); | ||
204 | goto out_lock; | 205 | goto out_lock; |
205 | } | ||
206 | fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); | 206 | fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); |
207 | fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); | 207 | fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); |
208 | out_lock: | 208 | out_lock: |
209 | unlock_dir(lower_dir_dentry); | 209 | unlock_dir(lower_dir_dentry); |
210 | out: | 210 | out: |
211 | return rc; | 211 | return inode; |
212 | } | 212 | } |
213 | 213 | ||
214 | /** | 214 | /** |
@@ -219,26 +219,26 @@ out: | |||
219 | * | 219 | * |
220 | * Returns zero on success | 220 | * Returns zero on success |
221 | */ | 221 | */ |
222 | static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | 222 | static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, |
223 | struct inode *ecryptfs_inode) | ||
223 | { | 224 | { |
224 | struct ecryptfs_crypt_stat *crypt_stat = | 225 | struct ecryptfs_crypt_stat *crypt_stat = |
225 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 226 | &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; |
226 | int rc = 0; | 227 | int rc = 0; |
227 | 228 | ||
228 | if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { | 229 | if (S_ISDIR(ecryptfs_inode->i_mode)) { |
229 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); | 230 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); |
230 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); | 231 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); |
231 | goto out; | 232 | goto out; |
232 | } | 233 | } |
233 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); | 234 | ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); |
234 | rc = ecryptfs_new_file_context(ecryptfs_dentry); | 235 | rc = ecryptfs_new_file_context(ecryptfs_inode); |
235 | if (rc) { | 236 | if (rc) { |
236 | ecryptfs_printk(KERN_ERR, "Error creating new file " | 237 | ecryptfs_printk(KERN_ERR, "Error creating new file " |
237 | "context; rc = [%d]\n", rc); | 238 | "context; rc = [%d]\n", rc); |
238 | goto out; | 239 | goto out; |
239 | } | 240 | } |
240 | rc = ecryptfs_get_lower_file(ecryptfs_dentry, | 241 | rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode); |
241 | ecryptfs_dentry->d_inode); | ||
242 | if (rc) { | 242 | if (rc) { |
243 | printk(KERN_ERR "%s: Error attempting to initialize " | 243 | printk(KERN_ERR "%s: Error attempting to initialize " |
244 | "the lower file for the dentry with name " | 244 | "the lower file for the dentry with name " |
@@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) | |||
246 | ecryptfs_dentry->d_name.name, rc); | 246 | ecryptfs_dentry->d_name.name, rc); |
247 | goto out; | 247 | goto out; |
248 | } | 248 | } |
249 | rc = ecryptfs_write_metadata(ecryptfs_dentry); | 249 | rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode); |
250 | if (rc) | 250 | if (rc) |
251 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); | 251 | printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); |
252 | ecryptfs_put_lower_file(ecryptfs_dentry->d_inode); | 252 | ecryptfs_put_lower_file(ecryptfs_inode); |
253 | out: | 253 | out: |
254 | return rc; | 254 | return rc; |
255 | } | 255 | } |
@@ -269,18 +269,28 @@ static int | |||
269 | ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, | 269 | ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, |
270 | int mode, struct nameidata *nd) | 270 | int mode, struct nameidata *nd) |
271 | { | 271 | { |
272 | struct inode *ecryptfs_inode; | ||
272 | int rc; | 273 | int rc; |
273 | 274 | ||
274 | /* ecryptfs_do_create() calls ecryptfs_interpose() */ | 275 | ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry, |
275 | rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); | 276 | mode); |
276 | if (unlikely(rc)) { | 277 | if (unlikely(IS_ERR(ecryptfs_inode))) { |
277 | ecryptfs_printk(KERN_WARNING, "Failed to create file in" | 278 | ecryptfs_printk(KERN_WARNING, "Failed to create file in" |
278 | "lower filesystem\n"); | 279 | "lower filesystem\n"); |
280 | rc = PTR_ERR(ecryptfs_inode); | ||
279 | goto out; | 281 | goto out; |
280 | } | 282 | } |
281 | /* At this point, a file exists on "disk"; we need to make sure | 283 | /* At this point, a file exists on "disk"; we need to make sure |
282 | * that this on disk file is prepared to be an ecryptfs file */ | 284 | * that this on disk file is prepared to be an ecryptfs file */ |
283 | rc = ecryptfs_initialize_file(ecryptfs_dentry); | 285 | rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); |
286 | if (rc) { | ||
287 | drop_nlink(ecryptfs_inode); | ||
288 | unlock_new_inode(ecryptfs_inode); | ||
289 | iput(ecryptfs_inode); | ||
290 | goto out; | ||
291 | } | ||
292 | d_instantiate(ecryptfs_dentry, ecryptfs_inode); | ||
293 | unlock_new_inode(ecryptfs_inode); | ||
284 | out: | 294 | out: |
285 | return rc; | 295 | return rc; |
286 | } | 296 | } |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index f6dba4505f1c..12ccacda44e0 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -565,7 +565,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) | |||
565 | brelse(bitmap_bh); | 565 | brelse(bitmap_bh); |
566 | printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" | 566 | printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" |
567 | ", computed = %llu, %llu\n", | 567 | ", computed = %llu, %llu\n", |
568 | EXT4_B2C(sbi, ext4_free_blocks_count(es)), | 568 | EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), |
569 | desc_count, bitmap_count); | 569 | desc_count, bitmap_count); |
570 | return bitmap_count; | 570 | return bitmap_count; |
571 | #else | 571 | #else |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 240f6e2dc7ee..848f436df29f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -2270,6 +2270,7 @@ retry: | |||
2270 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " | 2270 | ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " |
2271 | "%ld pages, ino %lu; err %d", __func__, | 2271 | "%ld pages, ino %lu; err %d", __func__, |
2272 | wbc->nr_to_write, inode->i_ino, ret); | 2272 | wbc->nr_to_write, inode->i_ino, ret); |
2273 | blk_finish_plug(&plug); | ||
2273 | goto out_writepages; | 2274 | goto out_writepages; |
2274 | } | 2275 | } |
2275 | 2276 | ||
@@ -2806,8 +2807,8 @@ out: | |||
2806 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 2807 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
2807 | 2808 | ||
2808 | /* queue the work to convert unwritten extents to written */ | 2809 | /* queue the work to convert unwritten extents to written */ |
2809 | queue_work(wq, &io_end->work); | ||
2810 | iocb->private = NULL; | 2810 | iocb->private = NULL; |
2811 | queue_work(wq, &io_end->work); | ||
2811 | 2812 | ||
2812 | /* XXX: probably should move into the real I/O completion handler */ | 2813 | /* XXX: probably should move into the real I/O completion handler */ |
2813 | inode_dio_done(inode); | 2814 | inode_dio_done(inode); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9953d80145ad..3858767ec672 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1683,7 +1683,9 @@ static int parse_options(char *options, struct super_block *sb, | |||
1683 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; | 1683 | data_opt = EXT4_MOUNT_WRITEBACK_DATA; |
1684 | datacheck: | 1684 | datacheck: |
1685 | if (is_remount) { | 1685 | if (is_remount) { |
1686 | if (test_opt(sb, DATA_FLAGS) != data_opt) { | 1686 | if (!sbi->s_journal) |
1687 | ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); | ||
1688 | else if (test_opt(sb, DATA_FLAGS) != data_opt) { | ||
1687 | ext4_msg(sb, KERN_ERR, | 1689 | ext4_msg(sb, KERN_ERR, |
1688 | "Cannot change data mode on remount"); | 1690 | "Cannot change data mode on remount"); |
1689 | return 0; | 1691 | return 0; |
@@ -3099,8 +3101,6 @@ static void ext4_destroy_lazyinit_thread(void) | |||
3099 | } | 3101 | } |
3100 | 3102 | ||
3101 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) | 3103 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
3102 | __releases(kernel_lock) | ||
3103 | __acquires(kernel_lock) | ||
3104 | { | 3104 | { |
3105 | char *orig_data = kstrdup(data, GFP_KERNEL); | 3105 | char *orig_data = kstrdup(data, GFP_KERNEL); |
3106 | struct buffer_head *bh; | 3106 | struct buffer_head *bh; |
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c index e673a88b8ae7..b1ce4c7ad3fb 100644 --- a/fs/hfs/trans.c +++ b/fs/hfs/trans.c | |||
@@ -40,6 +40,8 @@ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in) | |||
40 | 40 | ||
41 | src = in->name; | 41 | src = in->name; |
42 | srclen = in->len; | 42 | srclen = in->len; |
43 | if (srclen > HFS_NAMELEN) | ||
44 | srclen = HFS_NAMELEN; | ||
43 | dst = out; | 45 | dst = out; |
44 | dstlen = HFS_MAX_NAMELEN; | 46 | dstlen = HFS_MAX_NAMELEN; |
45 | if (nls_io) { | 47 | if (nls_io) { |
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c index 3f32bcb0d9bd..ef175cb8cfd8 100644 --- a/fs/minix/bitmap.c +++ b/fs/minix/bitmap.c | |||
@@ -16,38 +16,26 @@ | |||
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | 18 | ||
19 | static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 }; | ||
20 | |||
21 | static DEFINE_SPINLOCK(bitmap_lock); | 19 | static DEFINE_SPINLOCK(bitmap_lock); |
22 | 20 | ||
23 | static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) | 21 | /* |
22 | * bitmap consists of blocks filled with 16bit words | ||
23 | * bit set == busy, bit clear == free | ||
24 | * endianness is a mess, but for counting zero bits it really doesn't matter... | ||
25 | */ | ||
26 | static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) | ||
24 | { | 27 | { |
25 | unsigned i, j, sum = 0; | 28 | __u32 sum = 0; |
26 | struct buffer_head *bh; | 29 | unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8); |
27 | |||
28 | for (i=0; i<numblocks-1; i++) { | ||
29 | if (!(bh=map[i])) | ||
30 | return(0); | ||
31 | for (j=0; j<bh->b_size; j++) | ||
32 | sum += nibblemap[bh->b_data[j] & 0xf] | ||
33 | + nibblemap[(bh->b_data[j]>>4) & 0xf]; | ||
34 | } | ||
35 | 30 | ||
36 | if (numblocks==0 || !(bh=map[numblocks-1])) | 31 | while (blocks--) { |
37 | return(0); | 32 | unsigned words = blocksize / 2; |
38 | i = ((numbits - (numblocks-1) * bh->b_size * 8) / 16) * 2; | 33 | __u16 *p = (__u16 *)(*map++)->b_data; |
39 | for (j=0; j<i; j++) { | 34 | while (words--) |
40 | sum += nibblemap[bh->b_data[j] & 0xf] | 35 | sum += 16 - hweight16(*p++); |
41 | + nibblemap[(bh->b_data[j]>>4) & 0xf]; | ||
42 | } | 36 | } |
43 | 37 | ||
44 | i = numbits%16; | 38 | return sum; |
45 | if (i!=0) { | ||
46 | i = *(__u16 *)(&bh->b_data[j]) | ~((1<<i) - 1); | ||
47 | sum += nibblemap[i & 0xf] + nibblemap[(i>>4) & 0xf]; | ||
48 | sum += nibblemap[(i>>8) & 0xf] + nibblemap[(i>>12) & 0xf]; | ||
49 | } | ||
50 | return(sum); | ||
51 | } | 39 | } |
52 | 40 | ||
53 | void minix_free_block(struct inode *inode, unsigned long block) | 41 | void minix_free_block(struct inode *inode, unsigned long block) |
@@ -105,10 +93,12 @@ int minix_new_block(struct inode * inode) | |||
105 | return 0; | 93 | return 0; |
106 | } | 94 | } |
107 | 95 | ||
108 | unsigned long minix_count_free_blocks(struct minix_sb_info *sbi) | 96 | unsigned long minix_count_free_blocks(struct super_block *sb) |
109 | { | 97 | { |
110 | return (count_free(sbi->s_zmap, sbi->s_zmap_blocks, | 98 | struct minix_sb_info *sbi = minix_sb(sb); |
111 | sbi->s_nzones - sbi->s_firstdatazone + 1) | 99 | u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); |
100 | |||
101 | return (count_free(sbi->s_zmap, sb->s_blocksize, bits) | ||
112 | << sbi->s_log_zone_size); | 102 | << sbi->s_log_zone_size); |
113 | } | 103 | } |
114 | 104 | ||
@@ -273,7 +263,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error) | |||
273 | return inode; | 263 | return inode; |
274 | } | 264 | } |
275 | 265 | ||
276 | unsigned long minix_count_free_inodes(struct minix_sb_info *sbi) | 266 | unsigned long minix_count_free_inodes(struct super_block *sb) |
277 | { | 267 | { |
278 | return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1); | 268 | struct minix_sb_info *sbi = minix_sb(sb); |
269 | u32 bits = sbi->s_ninodes + 1; | ||
270 | |||
271 | return count_free(sbi->s_imap, sb->s_blocksize, bits); | ||
279 | } | 272 | } |
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 64cdcd662ffc..1d9e33966db0 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
279 | else if (sbi->s_mount_state & MINIX_ERROR_FS) | 279 | else if (sbi->s_mount_state & MINIX_ERROR_FS) |
280 | printk("MINIX-fs: mounting file system with errors, " | 280 | printk("MINIX-fs: mounting file system with errors, " |
281 | "running fsck is recommended\n"); | 281 | "running fsck is recommended\n"); |
282 | |||
283 | /* Apparently minix can create filesystems that allocate more blocks for | ||
284 | * the bitmaps than needed. We simply ignore that, but verify it didn't | ||
285 | * create one with not enough blocks and bail out if so. | ||
286 | */ | ||
287 | block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); | ||
288 | if (sbi->s_imap_blocks < block) { | ||
289 | printk("MINIX-fs: file system does not have enough " | ||
290 | "imap blocks allocated. Refusing to mount\n"); | ||
291 | goto out_iput; | ||
292 | } | ||
293 | |||
294 | block = minix_blocks_needed( | ||
295 | (sbi->s_nzones - (sbi->s_firstdatazone + 1)), | ||
296 | s->s_blocksize); | ||
297 | if (sbi->s_zmap_blocks < block) { | ||
298 | printk("MINIX-fs: file system does not have enough " | ||
299 | "zmap blocks allocated. Refusing to mount.\n"); | ||
300 | goto out_iput; | ||
301 | } | ||
302 | |||
282 | return 0; | 303 | return 0; |
283 | 304 | ||
284 | out_iput: | 305 | out_iput: |
@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
339 | buf->f_type = sb->s_magic; | 360 | buf->f_type = sb->s_magic; |
340 | buf->f_bsize = sb->s_blocksize; | 361 | buf->f_bsize = sb->s_blocksize; |
341 | buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; | 362 | buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; |
342 | buf->f_bfree = minix_count_free_blocks(sbi); | 363 | buf->f_bfree = minix_count_free_blocks(sb); |
343 | buf->f_bavail = buf->f_bfree; | 364 | buf->f_bavail = buf->f_bfree; |
344 | buf->f_files = sbi->s_ninodes; | 365 | buf->f_files = sbi->s_ninodes; |
345 | buf->f_ffree = minix_count_free_inodes(sbi); | 366 | buf->f_ffree = minix_count_free_inodes(sb); |
346 | buf->f_namelen = sbi->s_namelen; | 367 | buf->f_namelen = sbi->s_namelen; |
347 | buf->f_fsid.val[0] = (u32)id; | 368 | buf->f_fsid.val[0] = (u32)id; |
348 | buf->f_fsid.val[1] = (u32)(id >> 32); | 369 | buf->f_fsid.val[1] = (u32)(id >> 32); |
diff --git a/fs/minix/minix.h b/fs/minix/minix.h index 341e2122879a..26bbd55e82ea 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h | |||
@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru | |||
48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 48 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); | 49 | extern struct inode * minix_new_inode(const struct inode *, int, int *); |
50 | extern void minix_free_inode(struct inode * inode); | 50 | extern void minix_free_inode(struct inode * inode); |
51 | extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi); | 51 | extern unsigned long minix_count_free_inodes(struct super_block *sb); |
52 | extern int minix_new_block(struct inode * inode); | 52 | extern int minix_new_block(struct inode * inode); |
53 | extern void minix_free_block(struct inode *inode, unsigned long block); | 53 | extern void minix_free_block(struct inode *inode, unsigned long block); |
54 | extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi); | 54 | extern unsigned long minix_count_free_blocks(struct super_block *sb); |
55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 55 | extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); | 56 | extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); |
57 | 57 | ||
@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode) | |||
88 | return list_entry(inode, struct minix_inode_info, vfs_inode); | 88 | return list_entry(inode, struct minix_inode_info, vfs_inode); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize) | ||
92 | { | ||
93 | return DIV_ROUND_UP(bits, blocksize * 8); | ||
94 | } | ||
95 | |||
91 | #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ | 96 | #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \ |
92 | defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) | 97 | defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED) |
93 | 98 | ||
@@ -125,7 +130,7 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | |||
125 | if (!size) | 130 | if (!size) |
126 | return 0; | 131 | return 0; |
127 | 132 | ||
128 | size = (size >> 4) + ((size & 15) > 0); | 133 | size >>= 4; |
129 | while (*p++ == 0xffff) { | 134 | while (*p++ == 0xffff) { |
130 | if (--size == 0) | 135 | if (--size == 0) |
131 | return (p - addr) << 4; | 136 | return (p - addr) << 4; |
diff --git a/fs/namespace.c b/fs/namespace.c index e5e1c7d1839b..cfc6d4448aa5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v) | |||
1048 | if (err) | 1048 | if (err) |
1049 | goto out; | 1049 | goto out; |
1050 | seq_putc(m, ' '); | 1050 | seq_putc(m, ' '); |
1051 | seq_path_root(m, &mnt_path, &root, " \t\n\\"); | 1051 | |
1052 | if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { | 1052 | /* mountpoints outside of chroot jail will give SEQ_SKIP on this */ |
1053 | /* | 1053 | err = seq_path_root(m, &mnt_path, &root, " \t\n\\"); |
1054 | * Mountpoint is outside root, discard that one. Ugly, | 1054 | if (err) |
1055 | * but less so than trying to do that in iterator in a | 1055 | goto out; |
1056 | * race-free way (due to renames). | 1056 | |
1057 | */ | ||
1058 | return SEQ_SKIP; | ||
1059 | } | ||
1060 | seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); | 1057 | seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); |
1061 | show_mnt_opts(m, mnt); | 1058 | show_mnt_opts(m, mnt); |
1062 | 1059 | ||
@@ -2483,11 +2480,43 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) | |||
2483 | __mnt_make_longterm(mnt); | 2480 | __mnt_make_longterm(mnt); |
2484 | new_ns->root = mnt; | 2481 | new_ns->root = mnt; |
2485 | list_add(&new_ns->list, &new_ns->root->mnt_list); | 2482 | list_add(&new_ns->list, &new_ns->root->mnt_list); |
2483 | } else { | ||
2484 | mntput(mnt); | ||
2486 | } | 2485 | } |
2487 | return new_ns; | 2486 | return new_ns; |
2488 | } | 2487 | } |
2489 | EXPORT_SYMBOL(create_mnt_ns); | 2488 | EXPORT_SYMBOL(create_mnt_ns); |
2490 | 2489 | ||
2490 | struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) | ||
2491 | { | ||
2492 | struct mnt_namespace *ns; | ||
2493 | struct super_block *s; | ||
2494 | struct path path; | ||
2495 | int err; | ||
2496 | |||
2497 | ns = create_mnt_ns(mnt); | ||
2498 | if (IS_ERR(ns)) | ||
2499 | return ERR_CAST(ns); | ||
2500 | |||
2501 | err = vfs_path_lookup(mnt->mnt_root, mnt, | ||
2502 | name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); | ||
2503 | |||
2504 | put_mnt_ns(ns); | ||
2505 | |||
2506 | if (err) | ||
2507 | return ERR_PTR(err); | ||
2508 | |||
2509 | /* trade a vfsmount reference for active sb one */ | ||
2510 | s = path.mnt->mnt_sb; | ||
2511 | atomic_inc(&s->s_active); | ||
2512 | mntput(path.mnt); | ||
2513 | /* lock the sucker */ | ||
2514 | down_write(&s->s_umount); | ||
2515 | /* ... and return the root of (sub)tree on it */ | ||
2516 | return path.dentry; | ||
2517 | } | ||
2518 | EXPORT_SYMBOL(mount_subtree); | ||
2519 | |||
2491 | SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, | 2520 | SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, |
2492 | char __user *, type, unsigned long, flags, void __user *, data) | 2521 | char __user *, type, unsigned long, flags, void __user *, data) |
2493 | { | 2522 | { |
@@ -2744,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt) | |||
2744 | } | 2773 | } |
2745 | } | 2774 | } |
2746 | EXPORT_SYMBOL(kern_unmount); | 2775 | EXPORT_SYMBOL(kern_unmount); |
2776 | |||
2777 | bool our_mnt(struct vfsmount *mnt) | ||
2778 | { | ||
2779 | return check_mnt(mnt); | ||
2780 | } | ||
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b238d95ac48c..ac2899098147 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1468,12 +1468,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry | |||
1468 | res = NULL; | 1468 | res = NULL; |
1469 | goto out; | 1469 | goto out; |
1470 | /* This turned out not to be a regular file */ | 1470 | /* This turned out not to be a regular file */ |
1471 | case -EISDIR: | ||
1471 | case -ENOTDIR: | 1472 | case -ENOTDIR: |
1472 | goto no_open; | 1473 | goto no_open; |
1473 | case -ELOOP: | 1474 | case -ELOOP: |
1474 | if (!(nd->intent.open.flags & O_NOFOLLOW)) | 1475 | if (!(nd->intent.open.flags & O_NOFOLLOW)) |
1475 | goto no_open; | 1476 | goto no_open; |
1476 | /* case -EISDIR: */ | ||
1477 | /* case -EINVAL: */ | 1477 | /* case -EINVAL: */ |
1478 | default: | 1478 | default: |
1479 | res = ERR_CAST(inode); | 1479 | res = ERR_CAST(inode); |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 0a1f8312b4dc..eca56d4b39c0 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -40,48 +40,8 @@ | |||
40 | 40 | ||
41 | #define NFSDBG_FACILITY NFSDBG_FILE | 41 | #define NFSDBG_FACILITY NFSDBG_FILE |
42 | 42 | ||
43 | static int nfs_file_open(struct inode *, struct file *); | ||
44 | static int nfs_file_release(struct inode *, struct file *); | ||
45 | static loff_t nfs_file_llseek(struct file *file, loff_t offset, int origin); | ||
46 | static int nfs_file_mmap(struct file *, struct vm_area_struct *); | ||
47 | static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos, | ||
48 | struct pipe_inode_info *pipe, | ||
49 | size_t count, unsigned int flags); | ||
50 | static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov, | ||
51 | unsigned long nr_segs, loff_t pos); | ||
52 | static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, | ||
53 | struct file *filp, loff_t *ppos, | ||
54 | size_t count, unsigned int flags); | ||
55 | static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, | ||
56 | unsigned long nr_segs, loff_t pos); | ||
57 | static int nfs_file_flush(struct file *, fl_owner_t id); | ||
58 | static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync); | ||
59 | static int nfs_check_flags(int flags); | ||
60 | static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); | ||
61 | static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); | ||
62 | static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); | ||
63 | |||
64 | static const struct vm_operations_struct nfs_file_vm_ops; | 43 | static const struct vm_operations_struct nfs_file_vm_ops; |
65 | 44 | ||
66 | const struct file_operations nfs_file_operations = { | ||
67 | .llseek = nfs_file_llseek, | ||
68 | .read = do_sync_read, | ||
69 | .write = do_sync_write, | ||
70 | .aio_read = nfs_file_read, | ||
71 | .aio_write = nfs_file_write, | ||
72 | .mmap = nfs_file_mmap, | ||
73 | .open = nfs_file_open, | ||
74 | .flush = nfs_file_flush, | ||
75 | .release = nfs_file_release, | ||
76 | .fsync = nfs_file_fsync, | ||
77 | .lock = nfs_lock, | ||
78 | .flock = nfs_flock, | ||
79 | .splice_read = nfs_file_splice_read, | ||
80 | .splice_write = nfs_file_splice_write, | ||
81 | .check_flags = nfs_check_flags, | ||
82 | .setlease = nfs_setlease, | ||
83 | }; | ||
84 | |||
85 | const struct inode_operations nfs_file_inode_operations = { | 45 | const struct inode_operations nfs_file_inode_operations = { |
86 | .permission = nfs_permission, | 46 | .permission = nfs_permission, |
87 | .getattr = nfs_getattr, | 47 | .getattr = nfs_getattr, |
@@ -886,3 +846,54 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl) | |||
886 | file->f_path.dentry->d_name.name, arg); | 846 | file->f_path.dentry->d_name.name, arg); |
887 | return -EINVAL; | 847 | return -EINVAL; |
888 | } | 848 | } |
849 | |||
850 | const struct file_operations nfs_file_operations = { | ||
851 | .llseek = nfs_file_llseek, | ||
852 | .read = do_sync_read, | ||
853 | .write = do_sync_write, | ||
854 | .aio_read = nfs_file_read, | ||
855 | .aio_write = nfs_file_write, | ||
856 | .mmap = nfs_file_mmap, | ||
857 | .open = nfs_file_open, | ||
858 | .flush = nfs_file_flush, | ||
859 | .release = nfs_file_release, | ||
860 | .fsync = nfs_file_fsync, | ||
861 | .lock = nfs_lock, | ||
862 | .flock = nfs_flock, | ||
863 | .splice_read = nfs_file_splice_read, | ||
864 | .splice_write = nfs_file_splice_write, | ||
865 | .check_flags = nfs_check_flags, | ||
866 | .setlease = nfs_setlease, | ||
867 | }; | ||
868 | |||
869 | #ifdef CONFIG_NFS_V4 | ||
870 | static int | ||
871 | nfs4_file_open(struct inode *inode, struct file *filp) | ||
872 | { | ||
873 | /* | ||
874 | * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to | ||
875 | * this point, then something is very wrong | ||
876 | */ | ||
877 | dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp); | ||
878 | return -ENOTDIR; | ||
879 | } | ||
880 | |||
881 | const struct file_operations nfs4_file_operations = { | ||
882 | .llseek = nfs_file_llseek, | ||
883 | .read = do_sync_read, | ||
884 | .write = do_sync_write, | ||
885 | .aio_read = nfs_file_read, | ||
886 | .aio_write = nfs_file_write, | ||
887 | .mmap = nfs_file_mmap, | ||
888 | .open = nfs4_file_open, | ||
889 | .flush = nfs_file_flush, | ||
890 | .release = nfs_file_release, | ||
891 | .fsync = nfs_file_fsync, | ||
892 | .lock = nfs_lock, | ||
893 | .flock = nfs_flock, | ||
894 | .splice_read = nfs_file_splice_read, | ||
895 | .splice_write = nfs_file_splice_write, | ||
896 | .check_flags = nfs_check_flags, | ||
897 | .setlease = nfs_setlease, | ||
898 | }; | ||
899 | #endif /* CONFIG_NFS_V4 */ | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c07a55aec838..50a15fa8cf98 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) | |||
291 | */ | 291 | */ |
292 | inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; | 292 | inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops; |
293 | if (S_ISREG(inode->i_mode)) { | 293 | if (S_ISREG(inode->i_mode)) { |
294 | inode->i_fop = &nfs_file_operations; | 294 | inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops; |
295 | inode->i_data.a_ops = &nfs_file_aops; | 295 | inode->i_data.a_ops = &nfs_file_aops; |
296 | inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; | 296 | inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; |
297 | } else if (S_ISDIR(inode->i_mode)) { | 297 | } else if (S_ISDIR(inode->i_mode)) { |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index c1a1bd8ddf1c..3f4d95751d52 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -299,6 +299,8 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata); | |||
299 | extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, | 299 | extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, |
300 | struct list_head *head); | 300 | struct list_head *head); |
301 | 301 | ||
302 | extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, | ||
303 | struct inode *inode); | ||
302 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); | 304 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); |
303 | extern void nfs_readdata_release(struct nfs_read_data *rdata); | 305 | extern void nfs_readdata_release(struct nfs_read_data *rdata); |
304 | 306 | ||
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 85f1690ca08c..d4bc9ed91748 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -853,6 +853,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = { | |||
853 | .dentry_ops = &nfs_dentry_operations, | 853 | .dentry_ops = &nfs_dentry_operations, |
854 | .dir_inode_ops = &nfs3_dir_inode_operations, | 854 | .dir_inode_ops = &nfs3_dir_inode_operations, |
855 | .file_inode_ops = &nfs3_file_inode_operations, | 855 | .file_inode_ops = &nfs3_file_inode_operations, |
856 | .file_ops = &nfs_file_operations, | ||
856 | .getroot = nfs3_proc_get_root, | 857 | .getroot = nfs3_proc_get_root, |
857 | .getattr = nfs3_proc_getattr, | 858 | .getattr = nfs3_proc_getattr, |
858 | .setattr = nfs3_proc_setattr, | 859 | .setattr = nfs3_proc_setattr, |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b60fddf606f7..be2bbac13817 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2464,8 +2464,7 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst | |||
2464 | case -NFS4ERR_BADNAME: | 2464 | case -NFS4ERR_BADNAME: |
2465 | return -ENOENT; | 2465 | return -ENOENT; |
2466 | case -NFS4ERR_MOVED: | 2466 | case -NFS4ERR_MOVED: |
2467 | err = nfs4_get_referral(dir, name, fattr, fhandle); | 2467 | return nfs4_get_referral(dir, name, fattr, fhandle); |
2468 | break; | ||
2469 | case -NFS4ERR_WRONGSEC: | 2468 | case -NFS4ERR_WRONGSEC: |
2470 | nfs_fixup_secinfo_attributes(fattr, fhandle); | 2469 | nfs_fixup_secinfo_attributes(fattr, fhandle); |
2471 | } | 2470 | } |
@@ -6253,6 +6252,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = { | |||
6253 | .dentry_ops = &nfs4_dentry_operations, | 6252 | .dentry_ops = &nfs4_dentry_operations, |
6254 | .dir_inode_ops = &nfs4_dir_inode_operations, | 6253 | .dir_inode_ops = &nfs4_dir_inode_operations, |
6255 | .file_inode_ops = &nfs4_file_inode_operations, | 6254 | .file_inode_ops = &nfs4_file_inode_operations, |
6255 | .file_ops = &nfs4_file_operations, | ||
6256 | .getroot = nfs4_proc_get_root, | 6256 | .getroot = nfs4_proc_get_root, |
6257 | .getattr = nfs4_proc_getattr, | 6257 | .getattr = nfs4_proc_getattr, |
6258 | .setattr = nfs4_proc_setattr, | 6258 | .setattr = nfs4_proc_setattr, |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index baf73536bc04..8e672a2b2d69 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1260,6 +1260,25 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) | |||
1260 | } | 1260 | } |
1261 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); | 1261 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); |
1262 | 1262 | ||
1263 | static void pnfs_ld_handle_read_error(struct nfs_read_data *data) | ||
1264 | { | ||
1265 | struct nfs_pageio_descriptor pgio; | ||
1266 | |||
1267 | put_lseg(data->lseg); | ||
1268 | data->lseg = NULL; | ||
1269 | dprintk("pnfs write error = %d\n", data->pnfs_error); | ||
1270 | |||
1271 | nfs_pageio_init_read_mds(&pgio, data->inode); | ||
1272 | |||
1273 | while (!list_empty(&data->pages)) { | ||
1274 | struct nfs_page *req = nfs_list_entry(data->pages.next); | ||
1275 | |||
1276 | nfs_list_remove_request(req); | ||
1277 | nfs_pageio_add_request(&pgio, req); | ||
1278 | } | ||
1279 | nfs_pageio_complete(&pgio); | ||
1280 | } | ||
1281 | |||
1263 | /* | 1282 | /* |
1264 | * Called by non rpc-based layout drivers | 1283 | * Called by non rpc-based layout drivers |
1265 | */ | 1284 | */ |
@@ -1268,11 +1287,8 @@ void pnfs_ld_read_done(struct nfs_read_data *data) | |||
1268 | if (likely(!data->pnfs_error)) { | 1287 | if (likely(!data->pnfs_error)) { |
1269 | __nfs4_read_done_cb(data); | 1288 | __nfs4_read_done_cb(data); |
1270 | data->mds_ops->rpc_call_done(&data->task, data); | 1289 | data->mds_ops->rpc_call_done(&data->task, data); |
1271 | } else { | 1290 | } else |
1272 | put_lseg(data->lseg); | 1291 | pnfs_ld_handle_read_error(data); |
1273 | data->lseg = NULL; | ||
1274 | dprintk("pnfs write error = %d\n", data->pnfs_error); | ||
1275 | } | ||
1276 | data->mds_ops->rpc_release(data); | 1292 | data->mds_ops->rpc_release(data); |
1277 | } | 1293 | } |
1278 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); | 1294 | EXPORT_SYMBOL_GPL(pnfs_ld_read_done); |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index ac40b8535d7e..f48125da198a 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -710,6 +710,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { | |||
710 | .dentry_ops = &nfs_dentry_operations, | 710 | .dentry_ops = &nfs_dentry_operations, |
711 | .dir_inode_ops = &nfs_dir_inode_operations, | 711 | .dir_inode_ops = &nfs_dir_inode_operations, |
712 | .file_inode_ops = &nfs_file_inode_operations, | 712 | .file_inode_ops = &nfs_file_inode_operations, |
713 | .file_ops = &nfs_file_operations, | ||
713 | .getroot = nfs_proc_get_root, | 714 | .getroot = nfs_proc_get_root, |
714 | .getattr = nfs_proc_getattr, | 715 | .getattr = nfs_proc_getattr, |
715 | .setattr = nfs_proc_setattr, | 716 | .setattr = nfs_proc_setattr, |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 8b48ec63f722..cfa175c223dc 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -109,7 +109,7 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
112 | static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, | 112 | void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, |
113 | struct inode *inode) | 113 | struct inode *inode) |
114 | { | 114 | { |
115 | nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, | 115 | nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, |
@@ -534,23 +534,13 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | |||
534 | static void nfs_readpage_release_full(void *calldata) | 534 | static void nfs_readpage_release_full(void *calldata) |
535 | { | 535 | { |
536 | struct nfs_read_data *data = calldata; | 536 | struct nfs_read_data *data = calldata; |
537 | struct nfs_pageio_descriptor pgio; | ||
538 | 537 | ||
539 | if (data->pnfs_error) { | ||
540 | nfs_pageio_init_read_mds(&pgio, data->inode); | ||
541 | pgio.pg_recoalesce = 1; | ||
542 | } | ||
543 | while (!list_empty(&data->pages)) { | 538 | while (!list_empty(&data->pages)) { |
544 | struct nfs_page *req = nfs_list_entry(data->pages.next); | 539 | struct nfs_page *req = nfs_list_entry(data->pages.next); |
545 | 540 | ||
546 | nfs_list_remove_request(req); | 541 | nfs_list_remove_request(req); |
547 | if (!data->pnfs_error) | 542 | nfs_readpage_release(req); |
548 | nfs_readpage_release(req); | ||
549 | else | ||
550 | nfs_pageio_add_request(&pgio, req); | ||
551 | } | 543 | } |
552 | if (data->pnfs_error) | ||
553 | nfs_pageio_complete(&pgio); | ||
554 | nfs_readdata_release(calldata); | 544 | nfs_readdata_release(calldata); |
555 | } | 545 | } |
556 | 546 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 480b3b6bf71e..134777406ee3 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2787,43 +2787,18 @@ static void nfs_referral_loop_unprotect(void) | |||
2787 | static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, | 2787 | static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, |
2788 | const char *export_path) | 2788 | const char *export_path) |
2789 | { | 2789 | { |
2790 | struct mnt_namespace *ns_private; | ||
2791 | struct super_block *s; | ||
2792 | struct dentry *dentry; | 2790 | struct dentry *dentry; |
2793 | struct path path; | 2791 | int ret = nfs_referral_loop_protect(); |
2794 | int ret; | ||
2795 | |||
2796 | ns_private = create_mnt_ns(root_mnt); | ||
2797 | ret = PTR_ERR(ns_private); | ||
2798 | if (IS_ERR(ns_private)) | ||
2799 | goto out_mntput; | ||
2800 | |||
2801 | ret = nfs_referral_loop_protect(); | ||
2802 | if (ret != 0) | ||
2803 | goto out_put_mnt_ns; | ||
2804 | 2792 | ||
2805 | ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, | 2793 | if (ret) { |
2806 | export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); | 2794 | mntput(root_mnt); |
2795 | return ERR_PTR(ret); | ||
2796 | } | ||
2807 | 2797 | ||
2798 | dentry = mount_subtree(root_mnt, export_path); | ||
2808 | nfs_referral_loop_unprotect(); | 2799 | nfs_referral_loop_unprotect(); |
2809 | put_mnt_ns(ns_private); | ||
2810 | |||
2811 | if (ret != 0) | ||
2812 | goto out_err; | ||
2813 | |||
2814 | s = path.mnt->mnt_sb; | ||
2815 | atomic_inc(&s->s_active); | ||
2816 | dentry = dget(path.dentry); | ||
2817 | 2800 | ||
2818 | path_put(&path); | ||
2819 | down_write(&s->s_umount); | ||
2820 | return dentry; | 2801 | return dentry; |
2821 | out_put_mnt_ns: | ||
2822 | put_mnt_ns(ns_private); | ||
2823 | out_mntput: | ||
2824 | mntput(root_mnt); | ||
2825 | out_err: | ||
2826 | return ERR_PTR(ret); | ||
2827 | } | 2802 | } |
2828 | 2803 | ||
2829 | static struct dentry *nfs4_try_mount(int flags, const char *dev_name, | 2804 | static struct dentry *nfs4_try_mount(int flags, const char *dev_name, |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index ed553c60de82..3165aebb43c8 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode, | |||
5699 | OCFS2_JOURNAL_ACCESS_WRITE); | 5699 | OCFS2_JOURNAL_ACCESS_WRITE); |
5700 | if (ret) { | 5700 | if (ret) { |
5701 | mlog_errno(ret); | 5701 | mlog_errno(ret); |
5702 | goto out; | 5702 | goto out_commit; |
5703 | } | 5703 | } |
5704 | 5704 | ||
5705 | dquot_free_space_nodirty(inode, | 5705 | dquot_free_space_nodirty(inode, |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c1efe939c774..78b68af3b0e3 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { | 292 | if (down_read_trylock(&oi->ip_alloc_sem) == 0) { |
293 | /* | ||
294 | * Unlock the page and cycle ip_alloc_sem so that we don't | ||
295 | * busyloop waiting for ip_alloc_sem to unlock | ||
296 | */ | ||
293 | ret = AOP_TRUNCATED_PAGE; | 297 | ret = AOP_TRUNCATED_PAGE; |
298 | unlock_page(page); | ||
299 | unlock = 0; | ||
300 | down_read(&oi->ip_alloc_sem); | ||
301 | up_read(&oi->ip_alloc_sem); | ||
294 | goto out_inode_unlock; | 302 | goto out_inode_unlock; |
295 | } | 303 | } |
296 | 304 | ||
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
563 | { | 571 | { |
564 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | 572 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
565 | int level; | 573 | int level; |
574 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
566 | 575 | ||
567 | /* this io's submitter should not have unlocked this before we could */ | 576 | /* this io's submitter should not have unlocked this before we could */ |
568 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); | 577 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); |
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
570 | if (ocfs2_iocb_is_sem_locked(iocb)) | 579 | if (ocfs2_iocb_is_sem_locked(iocb)) |
571 | ocfs2_iocb_clear_sem_locked(iocb); | 580 | ocfs2_iocb_clear_sem_locked(iocb); |
572 | 581 | ||
582 | if (ocfs2_iocb_is_unaligned_aio(iocb)) { | ||
583 | ocfs2_iocb_clear_unaligned_aio(iocb); | ||
584 | |||
585 | if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) && | ||
586 | waitqueue_active(wq)) { | ||
587 | wake_up_all(wq); | ||
588 | } | ||
589 | } | ||
590 | |||
573 | ocfs2_iocb_clear_rw_locked(iocb); | 591 | ocfs2_iocb_clear_rw_locked(iocb); |
574 | 592 | ||
575 | level = ocfs2_iocb_rw_locked_level(iocb); | 593 | level = ocfs2_iocb_rw_locked_level(iocb); |
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt { | |||
863 | struct page *w_target_page; | 881 | struct page *w_target_page; |
864 | 882 | ||
865 | /* | 883 | /* |
884 | * w_target_locked is used for page_mkwrite path indicating no unlocking | ||
885 | * against w_target_page in ocfs2_write_end_nolock. | ||
886 | */ | ||
887 | unsigned int w_target_locked:1; | ||
888 | |||
889 | /* | ||
866 | * ocfs2_write_end() uses this to know what the real range to | 890 | * ocfs2_write_end() uses this to know what the real range to |
867 | * write in the target should be. | 891 | * write in the target should be. |
868 | */ | 892 | */ |
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) | |||
895 | 919 | ||
896 | static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) | 920 | static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) |
897 | { | 921 | { |
922 | int i; | ||
923 | |||
924 | /* | ||
925 | * w_target_locked is only set to true in the page_mkwrite() case. | ||
926 | * The intent is to allow us to lock the target page from write_begin() | ||
927 | * to write_end(). The caller must hold a ref on w_target_page. | ||
928 | */ | ||
929 | if (wc->w_target_locked) { | ||
930 | BUG_ON(!wc->w_target_page); | ||
931 | for (i = 0; i < wc->w_num_pages; i++) { | ||
932 | if (wc->w_target_page == wc->w_pages[i]) { | ||
933 | wc->w_pages[i] = NULL; | ||
934 | break; | ||
935 | } | ||
936 | } | ||
937 | mark_page_accessed(wc->w_target_page); | ||
938 | page_cache_release(wc->w_target_page); | ||
939 | } | ||
898 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); | 940 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); |
899 | 941 | ||
900 | brelse(wc->w_di_bh); | 942 | brelse(wc->w_di_bh); |
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, | |||
1132 | */ | 1174 | */ |
1133 | lock_page(mmap_page); | 1175 | lock_page(mmap_page); |
1134 | 1176 | ||
1177 | /* Exit and let the caller retry */ | ||
1135 | if (mmap_page->mapping != mapping) { | 1178 | if (mmap_page->mapping != mapping) { |
1179 | WARN_ON(mmap_page->mapping); | ||
1136 | unlock_page(mmap_page); | 1180 | unlock_page(mmap_page); |
1137 | /* | 1181 | ret = -EAGAIN; |
1138 | * Sanity check - the locking in | ||
1139 | * ocfs2_pagemkwrite() should ensure | ||
1140 | * that this code doesn't trigger. | ||
1141 | */ | ||
1142 | ret = -EINVAL; | ||
1143 | mlog_errno(ret); | ||
1144 | goto out; | 1182 | goto out; |
1145 | } | 1183 | } |
1146 | 1184 | ||
1147 | page_cache_get(mmap_page); | 1185 | page_cache_get(mmap_page); |
1148 | wc->w_pages[i] = mmap_page; | 1186 | wc->w_pages[i] = mmap_page; |
1187 | wc->w_target_locked = true; | ||
1149 | } else { | 1188 | } else { |
1150 | wc->w_pages[i] = find_or_create_page(mapping, index, | 1189 | wc->w_pages[i] = find_or_create_page(mapping, index, |
1151 | GFP_NOFS); | 1190 | GFP_NOFS); |
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, | |||
1160 | wc->w_target_page = wc->w_pages[i]; | 1199 | wc->w_target_page = wc->w_pages[i]; |
1161 | } | 1200 | } |
1162 | out: | 1201 | out: |
1202 | if (ret) | ||
1203 | wc->w_target_locked = false; | ||
1163 | return ret; | 1204 | return ret; |
1164 | } | 1205 | } |
1165 | 1206 | ||
@@ -1817,11 +1858,23 @@ try_again: | |||
1817 | */ | 1858 | */ |
1818 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, | 1859 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, |
1819 | cluster_of_pages, mmap_page); | 1860 | cluster_of_pages, mmap_page); |
1820 | if (ret) { | 1861 | if (ret && ret != -EAGAIN) { |
1821 | mlog_errno(ret); | 1862 | mlog_errno(ret); |
1822 | goto out_quota; | 1863 | goto out_quota; |
1823 | } | 1864 | } |
1824 | 1865 | ||
1866 | /* | ||
1867 | * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock | ||
1868 | * the target page. In this case, we exit with no error and no target | ||
1869 | * page. This will trigger the caller, page_mkwrite(), to re-try | ||
1870 | * the operation. | ||
1871 | */ | ||
1872 | if (ret == -EAGAIN) { | ||
1873 | BUG_ON(wc->w_target_page); | ||
1874 | ret = 0; | ||
1875 | goto out_quota; | ||
1876 | } | ||
1877 | |||
1825 | ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, | 1878 | ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, |
1826 | len); | 1879 | len); |
1827 | if (ret) { | 1880 | if (ret) { |
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 75cf3ad987a6..ffb2da370a99 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h | |||
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits { | |||
78 | OCFS2_IOCB_RW_LOCK = 0, | 78 | OCFS2_IOCB_RW_LOCK = 0, |
79 | OCFS2_IOCB_RW_LOCK_LEVEL, | 79 | OCFS2_IOCB_RW_LOCK_LEVEL, |
80 | OCFS2_IOCB_SEM, | 80 | OCFS2_IOCB_SEM, |
81 | OCFS2_IOCB_UNALIGNED_IO, | ||
81 | OCFS2_IOCB_NUM_LOCKS | 82 | OCFS2_IOCB_NUM_LOCKS |
82 | }; | 83 | }; |
83 | 84 | ||
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits { | |||
91 | clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) | 92 | clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) |
92 | #define ocfs2_iocb_is_sem_locked(iocb) \ | 93 | #define ocfs2_iocb_is_sem_locked(iocb) \ |
93 | test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) | 94 | test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private) |
95 | |||
96 | #define ocfs2_iocb_set_unaligned_aio(iocb) \ | ||
97 | set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
98 | #define ocfs2_iocb_clear_unaligned_aio(iocb) \ | ||
99 | clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
100 | #define ocfs2_iocb_is_unaligned_aio(iocb) \ | ||
101 | test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private) | ||
102 | |||
103 | #define OCFS2_IOEND_WQ_HASH_SZ 37 | ||
104 | #define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\ | ||
105 | OCFS2_IOEND_WQ_HASH_SZ]) | ||
106 | extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
107 | |||
94 | #endif /* OCFS2_FILE_H */ | 108 | #endif /* OCFS2_FILE_H */ |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9a3e6bbff27b..a4e855e3690e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -216,6 +216,7 @@ struct o2hb_region { | |||
216 | 216 | ||
217 | struct list_head hr_all_item; | 217 | struct list_head hr_all_item; |
218 | unsigned hr_unclean_stop:1, | 218 | unsigned hr_unclean_stop:1, |
219 | hr_aborted_start:1, | ||
219 | hr_item_pinned:1, | 220 | hr_item_pinned:1, |
220 | hr_item_dropped:1; | 221 | hr_item_dropped:1; |
221 | 222 | ||
@@ -254,6 +255,10 @@ struct o2hb_region { | |||
254 | * a more complete api that doesn't lead to this sort of fragility. */ | 255 | * a more complete api that doesn't lead to this sort of fragility. */ |
255 | atomic_t hr_steady_iterations; | 256 | atomic_t hr_steady_iterations; |
256 | 257 | ||
258 | /* terminate o2hb thread if it does not reach steady state | ||
259 | * (hr_steady_iterations == 0) within hr_unsteady_iterations */ | ||
260 | atomic_t hr_unsteady_iterations; | ||
261 | |||
257 | char hr_dev_name[BDEVNAME_SIZE]; | 262 | char hr_dev_name[BDEVNAME_SIZE]; |
258 | 263 | ||
259 | unsigned int hr_timeout_ms; | 264 | unsigned int hr_timeout_ms; |
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work) | |||
324 | 329 | ||
325 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) | 330 | static void o2hb_arm_write_timeout(struct o2hb_region *reg) |
326 | { | 331 | { |
332 | /* Arm writeout only after thread reaches steady state */ | ||
333 | if (atomic_read(®->hr_steady_iterations) != 0) | ||
334 | return; | ||
335 | |||
327 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", | 336 | mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", |
328 | O2HB_MAX_WRITE_TIMEOUT_MS); | 337 | O2HB_MAX_WRITE_TIMEOUT_MS); |
329 | 338 | ||
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg, | |||
537 | return read == computed; | 546 | return read == computed; |
538 | } | 547 | } |
539 | 548 | ||
540 | /* We want to make sure that nobody is heartbeating on top of us -- | 549 | /* |
541 | * this will help detect an invalid configuration. */ | 550 | * Compare the slot data with what we wrote in the last iteration. |
542 | static void o2hb_check_last_timestamp(struct o2hb_region *reg) | 551 | * If the match fails, print an appropriate error message. This is to |
552 | * detect errors like... another node hearting on the same slot, | ||
553 | * flaky device that is losing writes, etc. | ||
554 | * Returns 1 if check succeeds, 0 otherwise. | ||
555 | */ | ||
556 | static int o2hb_check_own_slot(struct o2hb_region *reg) | ||
543 | { | 557 | { |
544 | struct o2hb_disk_slot *slot; | 558 | struct o2hb_disk_slot *slot; |
545 | struct o2hb_disk_heartbeat_block *hb_block; | 559 | struct o2hb_disk_heartbeat_block *hb_block; |
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) | |||
548 | slot = ®->hr_slots[o2nm_this_node()]; | 562 | slot = ®->hr_slots[o2nm_this_node()]; |
549 | /* Don't check on our 1st timestamp */ | 563 | /* Don't check on our 1st timestamp */ |
550 | if (!slot->ds_last_time) | 564 | if (!slot->ds_last_time) |
551 | return; | 565 | return 0; |
552 | 566 | ||
553 | hb_block = slot->ds_raw_block; | 567 | hb_block = slot->ds_raw_block; |
554 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && | 568 | if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && |
555 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && | 569 | le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && |
556 | hb_block->hb_node == slot->ds_node_num) | 570 | hb_block->hb_node == slot->ds_node_num) |
557 | return; | 571 | return 1; |
558 | 572 | ||
559 | #define ERRSTR1 "Another node is heartbeating on device" | 573 | #define ERRSTR1 "Another node is heartbeating on device" |
560 | #define ERRSTR2 "Heartbeat generation mismatch on device" | 574 | #define ERRSTR2 "Heartbeat generation mismatch on device" |
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) | |||
574 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, | 588 | (unsigned long long)slot->ds_last_time, hb_block->hb_node, |
575 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), | 589 | (unsigned long long)le64_to_cpu(hb_block->hb_generation), |
576 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); | 590 | (unsigned long long)le64_to_cpu(hb_block->hb_seq)); |
591 | |||
592 | return 0; | ||
577 | } | 593 | } |
578 | 594 | ||
579 | static inline void o2hb_prepare_block(struct o2hb_region *reg, | 595 | static inline void o2hb_prepare_block(struct o2hb_region *reg, |
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |||
719 | o2nm_node_put(node); | 735 | o2nm_node_put(node); |
720 | } | 736 | } |
721 | 737 | ||
722 | static void o2hb_set_quorum_device(struct o2hb_region *reg, | 738 | static void o2hb_set_quorum_device(struct o2hb_region *reg) |
723 | struct o2hb_disk_slot *slot) | ||
724 | { | 739 | { |
725 | assert_spin_locked(&o2hb_live_lock); | ||
726 | |||
727 | if (!o2hb_global_heartbeat_active()) | 740 | if (!o2hb_global_heartbeat_active()) |
728 | return; | 741 | return; |
729 | 742 | ||
730 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | 743 | /* Prevent race with o2hb_heartbeat_group_drop_item() */ |
744 | if (kthread_should_stop()) | ||
745 | return; | ||
746 | |||
747 | /* Tag region as quorum only after thread reaches steady state */ | ||
748 | if (atomic_read(®->hr_steady_iterations) != 0) | ||
731 | return; | 749 | return; |
732 | 750 | ||
751 | spin_lock(&o2hb_live_lock); | ||
752 | |||
753 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
754 | goto unlock; | ||
755 | |||
733 | /* | 756 | /* |
734 | * A region can be added to the quorum only when it sees all | 757 | * A region can be added to the quorum only when it sees all |
735 | * live nodes heartbeat on it. In other words, the region has been | 758 | * live nodes heartbeat on it. In other words, the region has been |
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, | |||
737 | */ | 760 | */ |
738 | if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, | 761 | if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, |
739 | sizeof(o2hb_live_node_bitmap))) | 762 | sizeof(o2hb_live_node_bitmap))) |
740 | return; | 763 | goto unlock; |
741 | |||
742 | if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD) | ||
743 | return; | ||
744 | 764 | ||
745 | printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n", | 765 | printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n", |
746 | config_item_name(®->hr_item)); | 766 | config_item_name(®->hr_item), reg->hr_dev_name); |
747 | 767 | ||
748 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | 768 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); |
749 | 769 | ||
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, | |||
754 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | 774 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, |
755 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) | 775 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) |
756 | o2hb_region_unpin(NULL); | 776 | o2hb_region_unpin(NULL); |
777 | unlock: | ||
778 | spin_unlock(&o2hb_live_lock); | ||
757 | } | 779 | } |
758 | 780 | ||
759 | static int o2hb_check_slot(struct o2hb_region *reg, | 781 | static int o2hb_check_slot(struct o2hb_region *reg, |
@@ -925,8 +947,6 @@ fire_callbacks: | |||
925 | slot->ds_equal_samples = 0; | 947 | slot->ds_equal_samples = 0; |
926 | } | 948 | } |
927 | out: | 949 | out: |
928 | o2hb_set_quorum_device(reg, slot); | ||
929 | |||
930 | spin_unlock(&o2hb_live_lock); | 950 | spin_unlock(&o2hb_live_lock); |
931 | 951 | ||
932 | o2hb_run_event_list(&event); | 952 | o2hb_run_event_list(&event); |
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes, | |||
957 | 977 | ||
958 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | 978 | static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) |
959 | { | 979 | { |
960 | int i, ret, highest_node, change = 0; | 980 | int i, ret, highest_node; |
981 | int membership_change = 0, own_slot_ok = 0; | ||
961 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 982 | unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
962 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 983 | unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
963 | struct o2hb_bio_wait_ctxt write_wc; | 984 | struct o2hb_bio_wait_ctxt write_wc; |
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
966 | sizeof(configured_nodes)); | 987 | sizeof(configured_nodes)); |
967 | if (ret) { | 988 | if (ret) { |
968 | mlog_errno(ret); | 989 | mlog_errno(ret); |
969 | return ret; | 990 | goto bail; |
970 | } | 991 | } |
971 | 992 | ||
972 | /* | 993 | /* |
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
982 | 1003 | ||
983 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); | 1004 | highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); |
984 | if (highest_node >= O2NM_MAX_NODES) { | 1005 | if (highest_node >= O2NM_MAX_NODES) { |
985 | mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); | 1006 | mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); |
986 | return -EINVAL; | 1007 | ret = -EINVAL; |
1008 | goto bail; | ||
987 | } | 1009 | } |
988 | 1010 | ||
989 | /* No sense in reading the slots of nodes that don't exist | 1011 | /* No sense in reading the slots of nodes that don't exist |
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
993 | ret = o2hb_read_slots(reg, highest_node + 1); | 1015 | ret = o2hb_read_slots(reg, highest_node + 1); |
994 | if (ret < 0) { | 1016 | if (ret < 0) { |
995 | mlog_errno(ret); | 1017 | mlog_errno(ret); |
996 | return ret; | 1018 | goto bail; |
997 | } | 1019 | } |
998 | 1020 | ||
999 | /* With an up to date view of the slots, we can check that no | 1021 | /* With an up to date view of the slots, we can check that no |
1000 | * other node has been improperly configured to heartbeat in | 1022 | * other node has been improperly configured to heartbeat in |
1001 | * our slot. */ | 1023 | * our slot. */ |
1002 | o2hb_check_last_timestamp(reg); | 1024 | own_slot_ok = o2hb_check_own_slot(reg); |
1003 | 1025 | ||
1004 | /* fill in the proper info for our next heartbeat */ | 1026 | /* fill in the proper info for our next heartbeat */ |
1005 | o2hb_prepare_block(reg, reg->hr_generation); | 1027 | o2hb_prepare_block(reg, reg->hr_generation); |
1006 | 1028 | ||
1007 | /* And fire off the write. Note that we don't wait on this I/O | ||
1008 | * until later. */ | ||
1009 | ret = o2hb_issue_node_write(reg, &write_wc); | 1029 | ret = o2hb_issue_node_write(reg, &write_wc); |
1010 | if (ret < 0) { | 1030 | if (ret < 0) { |
1011 | mlog_errno(ret); | 1031 | mlog_errno(ret); |
1012 | return ret; | 1032 | goto bail; |
1013 | } | 1033 | } |
1014 | 1034 | ||
1015 | i = -1; | 1035 | i = -1; |
1016 | while((i = find_next_bit(configured_nodes, | 1036 | while((i = find_next_bit(configured_nodes, |
1017 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { | 1037 | O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { |
1018 | change |= o2hb_check_slot(reg, ®->hr_slots[i]); | 1038 | membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]); |
1019 | } | 1039 | } |
1020 | 1040 | ||
1021 | /* | 1041 | /* |
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) | |||
1030 | * disk */ | 1050 | * disk */ |
1031 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", | 1051 | mlog(ML_ERROR, "Write error %d on device \"%s\"\n", |
1032 | write_wc.wc_error, reg->hr_dev_name); | 1052 | write_wc.wc_error, reg->hr_dev_name); |
1033 | return write_wc.wc_error; | 1053 | ret = write_wc.wc_error; |
1054 | goto bail; | ||
1034 | } | 1055 | } |
1035 | 1056 | ||
1036 | o2hb_arm_write_timeout(reg); | 1057 | /* Skip disarming the timeout if own slot has stale/bad data */ |
1058 | if (own_slot_ok) { | ||
1059 | o2hb_set_quorum_device(reg); | ||
1060 | o2hb_arm_write_timeout(reg); | ||
1061 | } | ||
1037 | 1062 | ||
1063 | bail: | ||
1038 | /* let the person who launched us know when things are steady */ | 1064 | /* let the person who launched us know when things are steady */ |
1039 | if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { | 1065 | if (atomic_read(®->hr_steady_iterations) != 0) { |
1040 | if (atomic_dec_and_test(®->hr_steady_iterations)) | 1066 | if (!ret && own_slot_ok && !membership_change) { |
1067 | if (atomic_dec_and_test(®->hr_steady_iterations)) | ||
1068 | wake_up(&o2hb_steady_queue); | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | if (atomic_read(®->hr_steady_iterations) != 0) { | ||
1073 | if (atomic_dec_and_test(®->hr_unsteady_iterations)) { | ||
1074 | printk(KERN_NOTICE "o2hb: Unable to stabilize " | ||
1075 | "heartbeart on region %s (%s)\n", | ||
1076 | config_item_name(®->hr_item), | ||
1077 | reg->hr_dev_name); | ||
1078 | atomic_set(®->hr_steady_iterations, 0); | ||
1079 | reg->hr_aborted_start = 1; | ||
1041 | wake_up(&o2hb_steady_queue); | 1080 | wake_up(&o2hb_steady_queue); |
1081 | ret = -EIO; | ||
1082 | } | ||
1042 | } | 1083 | } |
1043 | 1084 | ||
1044 | return 0; | 1085 | return ret; |
1045 | } | 1086 | } |
1046 | 1087 | ||
1047 | /* Subtract b from a, storing the result in a. a *must* have a larger | 1088 | /* Subtract b from a, storing the result in a. a *must* have a larger |
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data) | |||
1095 | /* Pin node */ | 1136 | /* Pin node */ |
1096 | o2nm_depend_this_node(); | 1137 | o2nm_depend_this_node(); |
1097 | 1138 | ||
1098 | while (!kthread_should_stop() && !reg->hr_unclean_stop) { | 1139 | while (!kthread_should_stop() && |
1140 | !reg->hr_unclean_stop && !reg->hr_aborted_start) { | ||
1099 | /* We track the time spent inside | 1141 | /* We track the time spent inside |
1100 | * o2hb_do_disk_heartbeat so that we avoid more than | 1142 | * o2hb_do_disk_heartbeat so that we avoid more than |
1101 | * hr_timeout_ms between disk writes. On busy systems | 1143 | * hr_timeout_ms between disk writes. On busy systems |
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data) | |||
1103 | * likely to time itself out. */ | 1145 | * likely to time itself out. */ |
1104 | do_gettimeofday(&before_hb); | 1146 | do_gettimeofday(&before_hb); |
1105 | 1147 | ||
1106 | i = 0; | 1148 | ret = o2hb_do_disk_heartbeat(reg); |
1107 | do { | ||
1108 | ret = o2hb_do_disk_heartbeat(reg); | ||
1109 | } while (ret && ++i < 2); | ||
1110 | 1149 | ||
1111 | do_gettimeofday(&after_hb); | 1150 | do_gettimeofday(&after_hb); |
1112 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); | 1151 | elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); |
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data) | |||
1117 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, | 1156 | after_hb.tv_sec, (unsigned long) after_hb.tv_usec, |
1118 | elapsed_msec); | 1157 | elapsed_msec); |
1119 | 1158 | ||
1120 | if (elapsed_msec < reg->hr_timeout_ms) { | 1159 | if (!kthread_should_stop() && |
1160 | elapsed_msec < reg->hr_timeout_ms) { | ||
1121 | /* the kthread api has blocked signals for us so no | 1161 | /* the kthread api has blocked signals for us so no |
1122 | * need to record the return value. */ | 1162 | * need to record the return value. */ |
1123 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); | 1163 | msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); |
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data) | |||
1134 | * to timeout on this region when we could just as easily | 1174 | * to timeout on this region when we could just as easily |
1135 | * write a clear generation - thus indicating to them that | 1175 | * write a clear generation - thus indicating to them that |
1136 | * this node has left this region. | 1176 | * this node has left this region. |
1137 | * | 1177 | */ |
1138 | * XXX: Should we skip this on unclean_stop? */ | 1178 | if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { |
1139 | o2hb_prepare_block(reg, 0); | 1179 | o2hb_prepare_block(reg, 0); |
1140 | ret = o2hb_issue_node_write(reg, &write_wc); | 1180 | ret = o2hb_issue_node_write(reg, &write_wc); |
1141 | if (ret == 0) { | 1181 | if (ret == 0) |
1142 | o2hb_wait_on_io(reg, &write_wc); | 1182 | o2hb_wait_on_io(reg, &write_wc); |
1143 | } else { | 1183 | else |
1144 | mlog_errno(ret); | 1184 | mlog_errno(ret); |
1145 | } | 1185 | } |
1146 | 1186 | ||
1147 | /* Unpin node */ | 1187 | /* Unpin node */ |
1148 | o2nm_undepend_this_node(); | 1188 | o2nm_undepend_this_node(); |
1149 | 1189 | ||
1150 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); | 1190 | mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n"); |
1151 | 1191 | ||
1152 | return 0; | 1192 | return 0; |
1153 | } | 1193 | } |
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) | |||
1158 | struct o2hb_debug_buf *db = inode->i_private; | 1198 | struct o2hb_debug_buf *db = inode->i_private; |
1159 | struct o2hb_region *reg; | 1199 | struct o2hb_region *reg; |
1160 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 1200 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
1201 | unsigned long lts; | ||
1161 | char *buf = NULL; | 1202 | char *buf = NULL; |
1162 | int i = -1; | 1203 | int i = -1; |
1163 | int out = 0; | 1204 | int out = 0; |
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) | |||
1194 | 1235 | ||
1195 | case O2HB_DB_TYPE_REGION_ELAPSED_TIME: | 1236 | case O2HB_DB_TYPE_REGION_ELAPSED_TIME: |
1196 | reg = (struct o2hb_region *)db->db_data; | 1237 | reg = (struct o2hb_region *)db->db_data; |
1197 | out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", | 1238 | lts = reg->hr_last_timeout_start; |
1198 | jiffies_to_msecs(jiffies - | 1239 | /* If 0, it has never been set before */ |
1199 | reg->hr_last_timeout_start)); | 1240 | if (lts) |
1241 | lts = jiffies_to_msecs(jiffies - lts); | ||
1242 | out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); | ||
1200 | goto done; | 1243 | goto done; |
1201 | 1244 | ||
1202 | case O2HB_DB_TYPE_REGION_PINNED: | 1245 | case O2HB_DB_TYPE_REGION_PINNED: |
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item) | |||
1426 | struct page *page; | 1469 | struct page *page; |
1427 | struct o2hb_region *reg = to_o2hb_region(item); | 1470 | struct o2hb_region *reg = to_o2hb_region(item); |
1428 | 1471 | ||
1472 | mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name); | ||
1473 | |||
1429 | if (reg->hr_tmp_block) | 1474 | if (reg->hr_tmp_block) |
1430 | kfree(reg->hr_tmp_block); | 1475 | kfree(reg->hr_tmp_block); |
1431 | 1476 | ||
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1792 | live_threshold <<= 1; | 1837 | live_threshold <<= 1; |
1793 | spin_unlock(&o2hb_live_lock); | 1838 | spin_unlock(&o2hb_live_lock); |
1794 | } | 1839 | } |
1795 | atomic_set(®->hr_steady_iterations, live_threshold + 1); | 1840 | ++live_threshold; |
1841 | atomic_set(®->hr_steady_iterations, live_threshold); | ||
1842 | /* unsteady_iterations is double the steady_iterations */ | ||
1843 | atomic_set(®->hr_unsteady_iterations, (live_threshold << 1)); | ||
1796 | 1844 | ||
1797 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", | 1845 | hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", |
1798 | reg->hr_item.ci_name); | 1846 | reg->hr_item.ci_name); |
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1809 | ret = wait_event_interruptible(o2hb_steady_queue, | 1857 | ret = wait_event_interruptible(o2hb_steady_queue, |
1810 | atomic_read(®->hr_steady_iterations) == 0); | 1858 | atomic_read(®->hr_steady_iterations) == 0); |
1811 | if (ret) { | 1859 | if (ret) { |
1812 | /* We got interrupted (hello ptrace!). Clean up */ | 1860 | atomic_set(®->hr_steady_iterations, 0); |
1813 | spin_lock(&o2hb_live_lock); | 1861 | reg->hr_aborted_start = 1; |
1814 | hb_task = reg->hr_task; | 1862 | } |
1815 | reg->hr_task = NULL; | ||
1816 | spin_unlock(&o2hb_live_lock); | ||
1817 | 1863 | ||
1818 | if (hb_task) | 1864 | if (reg->hr_aborted_start) { |
1819 | kthread_stop(hb_task); | 1865 | ret = -EIO; |
1820 | goto out; | 1866 | goto out; |
1821 | } | 1867 | } |
1822 | 1868 | ||
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1833 | ret = -EIO; | 1879 | ret = -EIO; |
1834 | 1880 | ||
1835 | if (hb_task && o2hb_global_heartbeat_active()) | 1881 | if (hb_task && o2hb_global_heartbeat_active()) |
1836 | printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n", | 1882 | printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n", |
1837 | config_item_name(®->hr_item)); | 1883 | config_item_name(®->hr_item), reg->hr_dev_name); |
1838 | 1884 | ||
1839 | out: | 1885 | out: |
1840 | if (filp) | 1886 | if (filp) |
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2092 | 2138 | ||
2093 | /* stop the thread when the user removes the region dir */ | 2139 | /* stop the thread when the user removes the region dir */ |
2094 | spin_lock(&o2hb_live_lock); | 2140 | spin_lock(&o2hb_live_lock); |
2095 | if (o2hb_global_heartbeat_active()) { | ||
2096 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | ||
2097 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | ||
2098 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
2099 | quorum_region = 1; | ||
2100 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | ||
2101 | } | ||
2102 | hb_task = reg->hr_task; | 2141 | hb_task = reg->hr_task; |
2103 | reg->hr_task = NULL; | 2142 | reg->hr_task = NULL; |
2104 | reg->hr_item_dropped = 1; | 2143 | reg->hr_item_dropped = 1; |
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2107 | if (hb_task) | 2146 | if (hb_task) |
2108 | kthread_stop(hb_task); | 2147 | kthread_stop(hb_task); |
2109 | 2148 | ||
2149 | if (o2hb_global_heartbeat_active()) { | ||
2150 | spin_lock(&o2hb_live_lock); | ||
2151 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | ||
2152 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | ||
2153 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
2154 | quorum_region = 1; | ||
2155 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | ||
2156 | spin_unlock(&o2hb_live_lock); | ||
2157 | printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n", | ||
2158 | ((atomic_read(®->hr_steady_iterations) == 0) ? | ||
2159 | "stopped" : "start aborted"), config_item_name(item), | ||
2160 | reg->hr_dev_name); | ||
2161 | } | ||
2162 | |||
2110 | /* | 2163 | /* |
2111 | * If we're racing a dev_write(), we need to wake them. They will | 2164 | * If we're racing a dev_write(), we need to wake them. They will |
2112 | * check reg->hr_task | 2165 | * check reg->hr_task |
2113 | */ | 2166 | */ |
2114 | if (atomic_read(®->hr_steady_iterations) != 0) { | 2167 | if (atomic_read(®->hr_steady_iterations) != 0) { |
2168 | reg->hr_aborted_start = 1; | ||
2115 | atomic_set(®->hr_steady_iterations, 0); | 2169 | atomic_set(®->hr_steady_iterations, 0); |
2116 | wake_up(&o2hb_steady_queue); | 2170 | wake_up(&o2hb_steady_queue); |
2117 | } | 2171 | } |
2118 | 2172 | ||
2119 | if (o2hb_global_heartbeat_active()) | ||
2120 | printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", | ||
2121 | config_item_name(®->hr_item)); | ||
2122 | |||
2123 | config_item_put(item); | 2173 | config_item_put(item); |
2124 | 2174 | ||
2125 | if (!o2hb_global_heartbeat_active() || !quorum_region) | 2175 | if (!o2hb_global_heartbeat_active() || !quorum_region) |
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 3a5835904b3d..dc45deb19e68 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #define SC_DEBUG_NAME "sock_containers" | 47 | #define SC_DEBUG_NAME "sock_containers" |
48 | #define NST_DEBUG_NAME "send_tracking" | 48 | #define NST_DEBUG_NAME "send_tracking" |
49 | #define STATS_DEBUG_NAME "stats" | 49 | #define STATS_DEBUG_NAME "stats" |
50 | #define NODES_DEBUG_NAME "connected_nodes" | ||
50 | 51 | ||
51 | #define SHOW_SOCK_CONTAINERS 0 | 52 | #define SHOW_SOCK_CONTAINERS 0 |
52 | #define SHOW_SOCK_STATS 1 | 53 | #define SHOW_SOCK_STATS 1 |
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry; | |||
55 | static struct dentry *sc_dentry; | 56 | static struct dentry *sc_dentry; |
56 | static struct dentry *nst_dentry; | 57 | static struct dentry *nst_dentry; |
57 | static struct dentry *stats_dentry; | 58 | static struct dentry *stats_dentry; |
59 | static struct dentry *nodes_dentry; | ||
58 | 60 | ||
59 | static DEFINE_SPINLOCK(o2net_debug_lock); | 61 | static DEFINE_SPINLOCK(o2net_debug_lock); |
60 | 62 | ||
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = { | |||
491 | .release = sc_fop_release, | 493 | .release = sc_fop_release, |
492 | }; | 494 | }; |
493 | 495 | ||
494 | int o2net_debugfs_init(void) | 496 | static int o2net_fill_bitmap(char *buf, int len) |
495 | { | 497 | { |
496 | o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); | 498 | unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
497 | if (!o2net_dentry) { | 499 | int i = -1, out = 0; |
498 | mlog_errno(-ENOMEM); | ||
499 | goto bail; | ||
500 | } | ||
501 | 500 | ||
502 | nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, | 501 | o2net_fill_node_map(map, sizeof(map)); |
503 | o2net_dentry, NULL, | ||
504 | &nst_seq_fops); | ||
505 | if (!nst_dentry) { | ||
506 | mlog_errno(-ENOMEM); | ||
507 | goto bail; | ||
508 | } | ||
509 | 502 | ||
510 | sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, | 503 | while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) |
511 | o2net_dentry, NULL, | 504 | out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); |
512 | &sc_seq_fops); | 505 | out += snprintf(buf + out, PAGE_SIZE - out, "\n"); |
513 | if (!sc_dentry) { | ||
514 | mlog_errno(-ENOMEM); | ||
515 | goto bail; | ||
516 | } | ||
517 | 506 | ||
518 | stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, | 507 | return out; |
519 | o2net_dentry, NULL, | 508 | } |
520 | &stats_seq_fops); | 509 | |
521 | if (!stats_dentry) { | 510 | static int nodes_fop_open(struct inode *inode, struct file *file) |
522 | mlog_errno(-ENOMEM); | 511 | { |
523 | goto bail; | 512 | char *buf; |
524 | } | 513 | |
514 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
515 | if (!buf) | ||
516 | return -ENOMEM; | ||
517 | |||
518 | i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE)); | ||
519 | |||
520 | file->private_data = buf; | ||
525 | 521 | ||
526 | return 0; | 522 | return 0; |
527 | bail: | ||
528 | debugfs_remove(stats_dentry); | ||
529 | debugfs_remove(sc_dentry); | ||
530 | debugfs_remove(nst_dentry); | ||
531 | debugfs_remove(o2net_dentry); | ||
532 | return -ENOMEM; | ||
533 | } | 523 | } |
534 | 524 | ||
525 | static int o2net_debug_release(struct inode *inode, struct file *file) | ||
526 | { | ||
527 | kfree(file->private_data); | ||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | static ssize_t o2net_debug_read(struct file *file, char __user *buf, | ||
532 | size_t nbytes, loff_t *ppos) | ||
533 | { | ||
534 | return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, | ||
535 | i_size_read(file->f_mapping->host)); | ||
536 | } | ||
537 | |||
538 | static const struct file_operations nodes_fops = { | ||
539 | .open = nodes_fop_open, | ||
540 | .release = o2net_debug_release, | ||
541 | .read = o2net_debug_read, | ||
542 | .llseek = generic_file_llseek, | ||
543 | }; | ||
544 | |||
535 | void o2net_debugfs_exit(void) | 545 | void o2net_debugfs_exit(void) |
536 | { | 546 | { |
547 | debugfs_remove(nodes_dentry); | ||
537 | debugfs_remove(stats_dentry); | 548 | debugfs_remove(stats_dentry); |
538 | debugfs_remove(sc_dentry); | 549 | debugfs_remove(sc_dentry); |
539 | debugfs_remove(nst_dentry); | 550 | debugfs_remove(nst_dentry); |
540 | debugfs_remove(o2net_dentry); | 551 | debugfs_remove(o2net_dentry); |
541 | } | 552 | } |
542 | 553 | ||
554 | int o2net_debugfs_init(void) | ||
555 | { | ||
556 | mode_t mode = S_IFREG|S_IRUSR; | ||
557 | |||
558 | o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); | ||
559 | if (o2net_dentry) | ||
560 | nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode, | ||
561 | o2net_dentry, NULL, &nst_seq_fops); | ||
562 | if (nst_dentry) | ||
563 | sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode, | ||
564 | o2net_dentry, NULL, &sc_seq_fops); | ||
565 | if (sc_dentry) | ||
566 | stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode, | ||
567 | o2net_dentry, NULL, &stats_seq_fops); | ||
568 | if (stats_dentry) | ||
569 | nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode, | ||
570 | o2net_dentry, NULL, &nodes_fops); | ||
571 | if (nodes_dentry) | ||
572 | return 0; | ||
573 | |||
574 | o2net_debugfs_exit(); | ||
575 | mlog_errno(-ENOMEM); | ||
576 | return -ENOMEM; | ||
577 | } | ||
578 | |||
543 | #endif /* CONFIG_DEBUG_FS */ | 579 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index ad7d0c155de4..044e7b58d31c 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, | |||
546 | } | 546 | } |
547 | 547 | ||
548 | if (was_valid && !valid) { | 548 | if (was_valid && !valid) { |
549 | printk(KERN_NOTICE "o2net: no longer connected to " | 549 | printk(KERN_NOTICE "o2net: No longer connected to " |
550 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); | 550 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); |
551 | o2net_complete_nodes_nsw(nn); | 551 | o2net_complete_nodes_nsw(nn); |
552 | } | 552 | } |
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, | |||
556 | cancel_delayed_work(&nn->nn_connect_expired); | 556 | cancel_delayed_work(&nn->nn_connect_expired); |
557 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", | 557 | printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", |
558 | o2nm_this_node() > sc->sc_node->nd_num ? | 558 | o2nm_this_node() > sc->sc_node->nd_num ? |
559 | "connected to" : "accepted connection from", | 559 | "Connected to" : "Accepted connection from", |
560 | SC_NODEF_ARGS(sc)); | 560 | SC_NODEF_ARGS(sc)); |
561 | } | 561 | } |
562 | 562 | ||
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk) | |||
644 | o2net_sc_queue_work(sc, &sc->sc_connect_work); | 644 | o2net_sc_queue_work(sc, &sc->sc_connect_work); |
645 | break; | 645 | break; |
646 | default: | 646 | default: |
647 | printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT | 647 | printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT |
648 | " shutdown, state %d\n", | 648 | " shutdown, state %d\n", |
649 | SC_NODEF_ARGS(sc), sk->sk_state); | 649 | SC_NODEF_ARGS(sc), sk->sk_state); |
650 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); | 650 | o2net_sc_queue_work(sc, &sc->sc_shutdown_work); |
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn, | |||
1035 | return ret; | 1035 | return ret; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | /* Get a map of all nodes to which this node is currently connected to */ | ||
1039 | void o2net_fill_node_map(unsigned long *map, unsigned bytes) | ||
1040 | { | ||
1041 | struct o2net_sock_container *sc; | ||
1042 | int node, ret; | ||
1043 | |||
1044 | BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); | ||
1045 | |||
1046 | memset(map, 0, bytes); | ||
1047 | for (node = 0; node < O2NM_MAX_NODES; ++node) { | ||
1048 | o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret); | ||
1049 | if (!ret) { | ||
1050 | set_bit(node, map); | ||
1051 | sc_put(sc); | ||
1052 | } | ||
1053 | } | ||
1054 | } | ||
1055 | EXPORT_SYMBOL_GPL(o2net_fill_node_map); | ||
1056 | |||
1038 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, | 1057 | int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, |
1039 | size_t caller_veclen, u8 target_node, int *status) | 1058 | size_t caller_veclen, u8 target_node, int *status) |
1040 | { | 1059 | { |
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) | |||
1285 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 1304 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1286 | 1305 | ||
1287 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { | 1306 | if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { |
1288 | mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " | 1307 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net " |
1289 | "version %llu but %llu is required, disconnecting\n", | 1308 | "protocol version %llu but %llu is required. " |
1290 | SC_NODEF_ARGS(sc), | 1309 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1291 | (unsigned long long)be64_to_cpu(hand->protocol_version), | 1310 | (unsigned long long)be64_to_cpu(hand->protocol_version), |
1292 | O2NET_PROTOCOL_VERSION); | 1311 | O2NET_PROTOCOL_VERSION); |
1293 | 1312 | ||
1294 | /* don't bother reconnecting if its the wrong version. */ | 1313 | /* don't bother reconnecting if its the wrong version. */ |
1295 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1314 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) | |||
1303 | */ | 1322 | */ |
1304 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != | 1323 | if (be32_to_cpu(hand->o2net_idle_timeout_ms) != |
1305 | o2net_idle_timeout()) { | 1324 | o2net_idle_timeout()) { |
1306 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " | 1325 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network " |
1307 | "%u ms, but we use %u ms locally. disconnecting\n", | 1326 | "idle timeout of %u ms, but we use %u ms locally. " |
1308 | SC_NODEF_ARGS(sc), | 1327 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1309 | be32_to_cpu(hand->o2net_idle_timeout_ms), | 1328 | be32_to_cpu(hand->o2net_idle_timeout_ms), |
1310 | o2net_idle_timeout()); | 1329 | o2net_idle_timeout()); |
1311 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1330 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1312 | return -1; | 1331 | return -1; |
1313 | } | 1332 | } |
1314 | 1333 | ||
1315 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != | 1334 | if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != |
1316 | o2net_keepalive_delay()) { | 1335 | o2net_keepalive_delay()) { |
1317 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " | 1336 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive " |
1318 | "%u ms, but we use %u ms locally. disconnecting\n", | 1337 | "delay of %u ms, but we use %u ms locally. " |
1319 | SC_NODEF_ARGS(sc), | 1338 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1320 | be32_to_cpu(hand->o2net_keepalive_delay_ms), | 1339 | be32_to_cpu(hand->o2net_keepalive_delay_ms), |
1321 | o2net_keepalive_delay()); | 1340 | o2net_keepalive_delay()); |
1322 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1341 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1323 | return -1; | 1342 | return -1; |
1324 | } | 1343 | } |
1325 | 1344 | ||
1326 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != | 1345 | if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != |
1327 | O2HB_MAX_WRITE_TIMEOUT_MS) { | 1346 | O2HB_MAX_WRITE_TIMEOUT_MS) { |
1328 | mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " | 1347 | printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat " |
1329 | "%u ms, but we use %u ms locally. disconnecting\n", | 1348 | "timeout of %u ms, but we use %u ms locally. " |
1330 | SC_NODEF_ARGS(sc), | 1349 | "Disconnecting.\n", SC_NODEF_ARGS(sc), |
1331 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), | 1350 | be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), |
1332 | O2HB_MAX_WRITE_TIMEOUT_MS); | 1351 | O2HB_MAX_WRITE_TIMEOUT_MS); |
1333 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); | 1352 | o2net_ensure_shutdown(nn, sc, -ENOTCONN); |
1334 | return -1; | 1353 | return -1; |
1335 | } | 1354 | } |
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data) | |||
1540 | { | 1559 | { |
1541 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; | 1560 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; |
1542 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 1561 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1543 | |||
1544 | #ifdef CONFIG_DEBUG_FS | 1562 | #ifdef CONFIG_DEBUG_FS |
1545 | ktime_t now = ktime_get(); | 1563 | unsigned long msecs = ktime_to_ms(ktime_get()) - |
1564 | ktime_to_ms(sc->sc_tv_timer); | ||
1565 | #else | ||
1566 | unsigned long msecs = o2net_idle_timeout(); | ||
1546 | #endif | 1567 | #endif |
1547 | 1568 | ||
1548 | printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " | 1569 | printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been " |
1549 | "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), | 1570 | "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc), |
1550 | o2net_idle_timeout() / 1000, | 1571 | msecs / 1000, msecs % 1000); |
1551 | o2net_idle_timeout() % 1000); | ||
1552 | |||
1553 | #ifdef CONFIG_DEBUG_FS | ||
1554 | mlog(ML_NOTICE, "Here are some times that might help debug the " | ||
1555 | "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, " | ||
1556 | "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n", | ||
1557 | (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now), | ||
1558 | (long long)ktime_to_us(sc->sc_tv_data_ready), | ||
1559 | (long long)ktime_to_us(sc->sc_tv_advance_start), | ||
1560 | (long long)ktime_to_us(sc->sc_tv_advance_stop), | ||
1561 | sc->sc_msg_key, sc->sc_msg_type, | ||
1562 | (long long)ktime_to_us(sc->sc_tv_func_start), | ||
1563 | (long long)ktime_to_us(sc->sc_tv_func_stop)); | ||
1564 | #endif | ||
1565 | 1572 | ||
1566 | /* | 1573 | /* |
1567 | * Initialize the nn_timeout so that the next connection attempt | 1574 | * Initialize the nn_timeout so that the next connection attempt |
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work) | |||
1694 | 1701 | ||
1695 | out: | 1702 | out: |
1696 | if (ret) { | 1703 | if (ret) { |
1697 | mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " | 1704 | printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT |
1698 | "with errno %d\n", SC_NODEF_ARGS(sc), ret); | 1705 | " failed with errno %d\n", SC_NODEF_ARGS(sc), ret); |
1699 | /* 0 err so that another will be queued and attempted | 1706 | /* 0 err so that another will be queued and attempted |
1700 | * from set_nn_state */ | 1707 | * from set_nn_state */ |
1701 | if (sc) | 1708 | if (sc) |
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work) | |||
1718 | 1725 | ||
1719 | spin_lock(&nn->nn_lock); | 1726 | spin_lock(&nn->nn_lock); |
1720 | if (!nn->nn_sc_valid) { | 1727 | if (!nn->nn_sc_valid) { |
1721 | mlog(ML_ERROR, "no connection established with node %u after " | 1728 | printk(KERN_NOTICE "o2net: No connection established with " |
1722 | "%u.%u seconds, giving up and returning errors.\n", | 1729 | "node %u after %u.%u seconds, giving up.\n", |
1723 | o2net_num_from_nn(nn), | 1730 | o2net_num_from_nn(nn), |
1724 | o2net_idle_timeout() / 1000, | 1731 | o2net_idle_timeout() / 1000, |
1725 | o2net_idle_timeout() % 1000); | 1732 | o2net_idle_timeout() % 1000); |
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock) | |||
1862 | 1869 | ||
1863 | node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); | 1870 | node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); |
1864 | if (node == NULL) { | 1871 | if (node == NULL) { |
1865 | mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", | 1872 | printk(KERN_NOTICE "o2net: Attempt to connect from unknown " |
1866 | &sin.sin_addr.s_addr, ntohs(sin.sin_port)); | 1873 | "node at %pI4:%d\n", &sin.sin_addr.s_addr, |
1874 | ntohs(sin.sin_port)); | ||
1867 | ret = -EINVAL; | 1875 | ret = -EINVAL; |
1868 | goto out; | 1876 | goto out; |
1869 | } | 1877 | } |
1870 | 1878 | ||
1871 | if (o2nm_this_node() >= node->nd_num) { | 1879 | if (o2nm_this_node() >= node->nd_num) { |
1872 | local_node = o2nm_get_node_by_num(o2nm_this_node()); | 1880 | local_node = o2nm_get_node_by_num(o2nm_this_node()); |
1873 | mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' (" | 1881 | printk(KERN_NOTICE "o2net: Unexpected connect attempt seen " |
1874 | "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n", | 1882 | "at node '%s' (%u, %pI4:%d) from node '%s' (%u, " |
1875 | local_node->nd_name, local_node->nd_num, | 1883 | "%pI4:%d)\n", local_node->nd_name, local_node->nd_num, |
1876 | &(local_node->nd_ipv4_address), | 1884 | &(local_node->nd_ipv4_address), |
1877 | ntohs(local_node->nd_ipv4_port), | 1885 | ntohs(local_node->nd_ipv4_port), node->nd_name, |
1878 | node->nd_name, node->nd_num, &sin.sin_addr.s_addr, | 1886 | node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); |
1879 | ntohs(sin.sin_port)); | ||
1880 | ret = -EINVAL; | 1887 | ret = -EINVAL; |
1881 | goto out; | 1888 | goto out; |
1882 | } | 1889 | } |
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock) | |||
1901 | ret = 0; | 1908 | ret = 0; |
1902 | spin_unlock(&nn->nn_lock); | 1909 | spin_unlock(&nn->nn_lock); |
1903 | if (ret) { | 1910 | if (ret) { |
1904 | mlog(ML_NOTICE, "attempt to connect from node '%s' at " | 1911 | printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' " |
1905 | "%pI4:%d but it already has an open connection\n", | 1912 | "at %pI4:%d but it already has an open connection\n", |
1906 | node->nd_name, &sin.sin_addr.s_addr, | 1913 | node->nd_name, &sin.sin_addr.s_addr, |
1907 | ntohs(sin.sin_port)); | 1914 | ntohs(sin.sin_port)); |
1908 | goto out; | 1915 | goto out; |
1909 | } | 1916 | } |
1910 | 1917 | ||
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) | |||
1984 | 1991 | ||
1985 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | 1992 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); |
1986 | if (ret < 0) { | 1993 | if (ret < 0) { |
1987 | mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); | 1994 | printk(KERN_ERR "o2net: Error %d while creating socket\n", ret); |
1988 | goto out; | 1995 | goto out; |
1989 | } | 1996 | } |
1990 | 1997 | ||
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) | |||
2001 | sock->sk->sk_reuse = 1; | 2008 | sock->sk->sk_reuse = 1; |
2002 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | 2009 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
2003 | if (ret < 0) { | 2010 | if (ret < 0) { |
2004 | mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " | 2011 | printk(KERN_ERR "o2net: Error %d while binding socket at " |
2005 | "ret=%d\n", &addr, ntohs(port), ret); | 2012 | "%pI4:%u\n", ret, &addr, ntohs(port)); |
2006 | goto out; | 2013 | goto out; |
2007 | } | 2014 | } |
2008 | 2015 | ||
2009 | ret = sock->ops->listen(sock, 64); | 2016 | ret = sock->ops->listen(sock, 64); |
2010 | if (ret < 0) { | 2017 | if (ret < 0) |
2011 | mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", | 2018 | printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n", |
2012 | &addr, ntohs(port), ret); | 2019 | ret, &addr, ntohs(port)); |
2013 | } | ||
2014 | 2020 | ||
2015 | out: | 2021 | out: |
2016 | if (ret) { | 2022 | if (ret) { |
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h index fd6179eb26d4..5bada2a69b50 100644 --- a/fs/ocfs2/cluster/tcp.h +++ b/fs/ocfs2/cluster/tcp.h | |||
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, | |||
106 | struct list_head *unreg_list); | 106 | struct list_head *unreg_list); |
107 | void o2net_unregister_handler_list(struct list_head *list); | 107 | void o2net_unregister_handler_list(struct list_head *list); |
108 | 108 | ||
109 | void o2net_fill_node_map(unsigned long *map, unsigned bytes); | ||
110 | |||
109 | struct o2nm_node; | 111 | struct o2nm_node; |
110 | int o2net_register_hb_callbacks(void); | 112 | int o2net_register_hb_callbacks(void); |
111 | void o2net_unregister_hb_callbacks(void); | 113 | void o2net_unregister_hb_callbacks(void); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index e2878b5895fb..8fe4e2892ab9 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, | |||
1184 | if (pde) | 1184 | if (pde) |
1185 | le16_add_cpu(&pde->rec_len, | 1185 | le16_add_cpu(&pde->rec_len, |
1186 | le16_to_cpu(de->rec_len)); | 1186 | le16_to_cpu(de->rec_len)); |
1187 | else | 1187 | de->inode = 0; |
1188 | de->inode = 0; | ||
1189 | dir->i_version++; | 1188 | dir->i_version++; |
1190 | ocfs2_journal_dirty(handle, bh); | 1189 | ocfs2_journal_dirty(handle, bh); |
1191 | goto bail; | 1190 | goto bail; |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index d602abb51b61..a5952ceecba5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | |||
859 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | 859 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); |
860 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | 860 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); |
861 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); | 861 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); |
862 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); | 862 | void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); |
863 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); | 863 | void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); |
864 | 864 | ||
865 | void dlm_put(struct dlm_ctxt *dlm); | 865 | void dlm_put(struct dlm_ctxt *dlm); |
866 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | 866 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); |
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res) | |||
877 | kref_get(&res->refs); | 877 | kref_get(&res->refs); |
878 | } | 878 | } |
879 | void dlm_lockres_put(struct dlm_lock_resource *res); | 879 | void dlm_lockres_put(struct dlm_lock_resource *res); |
880 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | 880 | void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); |
881 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 881 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); |
882 | struct dlm_lock_resource *res); | ||
883 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | 882 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, |
884 | const char *name, | 883 | const char *name, |
885 | unsigned int len, | 884 | unsigned int len, |
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | |||
902 | const char *name, | 901 | const char *name, |
903 | unsigned int namelen); | 902 | unsigned int namelen); |
904 | 903 | ||
905 | #define dlm_lockres_set_refmap_bit(bit,res) \ | 904 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
906 | __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) | 905 | struct dlm_lock_resource *res, int bit); |
907 | #define dlm_lockres_clear_refmap_bit(bit,res) \ | 906 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, |
908 | __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) | 907 | struct dlm_lock_resource *res, int bit); |
909 | 908 | ||
910 | static inline void __dlm_lockres_set_refmap_bit(int bit, | 909 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
911 | struct dlm_lock_resource *res, | 910 | struct dlm_lock_resource *res); |
912 | const char *file, | 911 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, |
913 | int line) | 912 | struct dlm_lock_resource *res); |
914 | { | ||
915 | //printk("%s:%d:%.*s: setting bit %d\n", file, line, | ||
916 | // res->lockname.len, res->lockname.name, bit); | ||
917 | set_bit(bit, res->refmap); | ||
918 | } | ||
919 | |||
920 | static inline void __dlm_lockres_clear_refmap_bit(int bit, | ||
921 | struct dlm_lock_resource *res, | ||
922 | const char *file, | ||
923 | int line) | ||
924 | { | ||
925 | //printk("%s:%d:%.*s: clearing bit %d\n", file, line, | ||
926 | // res->lockname.len, res->lockname.name, bit); | ||
927 | clear_bit(bit, res->refmap); | ||
928 | } | ||
929 | |||
930 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | ||
931 | struct dlm_lock_resource *res, | ||
932 | const char *file, | ||
933 | int line); | ||
934 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
935 | struct dlm_lock_resource *res, | ||
936 | int new_lockres, | ||
937 | const char *file, | ||
938 | int line); | ||
939 | #define dlm_lockres_drop_inflight_ref(d,r) \ | ||
940 | __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__) | ||
941 | #define dlm_lockres_grab_inflight_ref(d,r) \ | ||
942 | __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__) | ||
943 | #define dlm_lockres_grab_inflight_ref_new(d,r) \ | ||
944 | __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__) | ||
945 | 913 | ||
946 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 914 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
947 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 915 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6ed6b95dcf93..92f2ead0fab6 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing, | |||
157 | 157 | ||
158 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | 158 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); |
159 | 159 | ||
160 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | 160 | void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
161 | { | 161 | { |
162 | if (!hlist_unhashed(&lockres->hash_node)) { | 162 | if (hlist_unhashed(&res->hash_node)) |
163 | hlist_del_init(&lockres->hash_node); | 163 | return; |
164 | dlm_lockres_put(lockres); | 164 | |
165 | } | 165 | mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, |
166 | res->lockname.name); | ||
167 | hlist_del_init(&res->hash_node); | ||
168 | dlm_lockres_put(res); | ||
166 | } | 169 | } |
167 | 170 | ||
168 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 171 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
169 | struct dlm_lock_resource *res) | ||
170 | { | 172 | { |
171 | struct hlist_head *bucket; | 173 | struct hlist_head *bucket; |
172 | struct qstr *q; | 174 | struct qstr *q; |
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, | |||
180 | dlm_lockres_get(res); | 182 | dlm_lockres_get(res); |
181 | 183 | ||
182 | hlist_add_head(&res->hash_node, bucket); | 184 | hlist_add_head(&res->hash_node, bucket); |
185 | |||
186 | mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, | ||
187 | res->lockname.name); | ||
183 | } | 188 | } |
184 | 189 | ||
185 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | 190 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, |
@@ -539,17 +544,17 @@ again: | |||
539 | 544 | ||
540 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) | 545 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) |
541 | { | 546 | { |
542 | int node = -1; | 547 | int node = -1, num = 0; |
543 | 548 | ||
544 | assert_spin_locked(&dlm->spinlock); | 549 | assert_spin_locked(&dlm->spinlock); |
545 | 550 | ||
546 | printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); | 551 | printk("( "); |
547 | |||
548 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | 552 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, |
549 | node + 1)) < O2NM_MAX_NODES) { | 553 | node + 1)) < O2NM_MAX_NODES) { |
550 | printk("%d ", node); | 554 | printk("%d ", node); |
555 | ++num; | ||
551 | } | 556 | } |
552 | printk("\n"); | 557 | printk(") %u nodes\n", num); |
553 | } | 558 | } |
554 | 559 | ||
555 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | 560 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, |
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |||
566 | 571 | ||
567 | node = exit_msg->node_idx; | 572 | node = exit_msg->node_idx; |
568 | 573 | ||
569 | printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name); | ||
570 | |||
571 | spin_lock(&dlm->spinlock); | 574 | spin_lock(&dlm->spinlock); |
572 | clear_bit(node, dlm->domain_map); | 575 | clear_bit(node, dlm->domain_map); |
573 | clear_bit(node, dlm->exit_domain_map); | 576 | clear_bit(node, dlm->exit_domain_map); |
577 | printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name); | ||
574 | __dlm_print_nodes(dlm); | 578 | __dlm_print_nodes(dlm); |
575 | 579 | ||
576 | /* notify anything attached to the heartbeat events */ | 580 | /* notify anything attached to the heartbeat events */ |
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
755 | 759 | ||
756 | dlm_mark_domain_leaving(dlm); | 760 | dlm_mark_domain_leaving(dlm); |
757 | dlm_leave_domain(dlm); | 761 | dlm_leave_domain(dlm); |
762 | printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name); | ||
758 | dlm_force_free_mles(dlm); | 763 | dlm_force_free_mles(dlm); |
759 | dlm_complete_dlm_shutdown(dlm); | 764 | dlm_complete_dlm_shutdown(dlm); |
760 | } | 765 | } |
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | |||
970 | clear_bit(assert->node_idx, dlm->exit_domain_map); | 975 | clear_bit(assert->node_idx, dlm->exit_domain_map); |
971 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 976 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); |
972 | 977 | ||
973 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", | 978 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ", |
974 | assert->node_idx, dlm->name); | 979 | assert->node_idx, dlm->name); |
975 | __dlm_print_nodes(dlm); | 980 | __dlm_print_nodes(dlm); |
976 | 981 | ||
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1701 | bail: | 1706 | bail: |
1702 | spin_lock(&dlm->spinlock); | 1707 | spin_lock(&dlm->spinlock); |
1703 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 1708 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); |
1704 | if (!status) | 1709 | if (!status) { |
1710 | printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name); | ||
1705 | __dlm_print_nodes(dlm); | 1711 | __dlm_print_nodes(dlm); |
1712 | } | ||
1706 | spin_unlock(&dlm->spinlock); | 1713 | spin_unlock(&dlm->spinlock); |
1707 | 1714 | ||
1708 | if (ctxt) { | 1715 | if (ctxt) { |
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, | |||
2131 | goto leave; | 2138 | goto leave; |
2132 | } | 2139 | } |
2133 | 2140 | ||
2134 | if (!o2hb_check_local_node_heartbeating()) { | ||
2135 | mlog(ML_ERROR, "the local node has not been configured, or is " | ||
2136 | "not heartbeating\n"); | ||
2137 | ret = -EPROTO; | ||
2138 | goto leave; | ||
2139 | } | ||
2140 | |||
2141 | mlog(0, "register called for domain \"%s\"\n", domain); | 2141 | mlog(0, "register called for domain \"%s\"\n", domain); |
2142 | 2142 | ||
2143 | retry: | 2143 | retry: |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 8d39e0fd66f7..975810b98492 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | |||
183 | kick_thread = 1; | 183 | kick_thread = 1; |
184 | } | 184 | } |
185 | } | 185 | } |
186 | /* reduce the inflight count, this may result in the lockres | ||
187 | * being purged below during calc_usage */ | ||
188 | if (lock->ml.node == dlm->node_num) | ||
189 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
190 | 186 | ||
191 | spin_unlock(&res->spinlock); | 187 | spin_unlock(&res->spinlock); |
192 | wake_up(&res->wq); | 188 | wake_up(&res->wq); |
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | |||
231 | lock->ml.type, res->lockname.len, | 227 | lock->ml.type, res->lockname.len, |
232 | res->lockname.name, flags); | 228 | res->lockname.name, flags); |
233 | 229 | ||
230 | /* | ||
231 | * Wait if resource is getting recovered, remastered, etc. | ||
232 | * If the resource was remastered and new owner is self, then exit. | ||
233 | */ | ||
234 | spin_lock(&res->spinlock); | 234 | spin_lock(&res->spinlock); |
235 | |||
236 | /* will exit this call with spinlock held */ | ||
237 | __dlm_wait_on_lockres(res); | 235 | __dlm_wait_on_lockres(res); |
236 | if (res->owner == dlm->node_num) { | ||
237 | spin_unlock(&res->spinlock); | ||
238 | return DLM_RECOVERING; | ||
239 | } | ||
238 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | 240 | res->state |= DLM_LOCK_RES_IN_PROGRESS; |
239 | 241 | ||
240 | /* add lock to local (secondary) queue */ | 242 | /* add lock to local (secondary) queue */ |
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | |||
319 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, | 321 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, |
320 | sizeof(create), res->owner, &status); | 322 | sizeof(create), res->owner, &status); |
321 | if (tmpret >= 0) { | 323 | if (tmpret >= 0) { |
322 | // successfully sent and received | 324 | ret = status; |
323 | ret = status; // this is already a dlm_status | ||
324 | if (ret == DLM_REJECTED) { | 325 | if (ret == DLM_REJECTED) { |
325 | mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " | 326 | mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " |
326 | "no longer owned by %u. that node is coming back " | 327 | "owned by node %u. That node is coming back up " |
327 | "up currently.\n", dlm->name, create.namelen, | 328 | "currently.\n", dlm->name, create.namelen, |
328 | create.name, res->owner); | 329 | create.name, res->owner); |
329 | dlm_print_one_lock_resource(res); | 330 | dlm_print_one_lock_resource(res); |
330 | BUG(); | 331 | BUG(); |
331 | } | 332 | } |
332 | } else { | 333 | } else { |
333 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 334 | mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " |
334 | "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, | 335 | "node %u\n", dlm->name, create.namelen, create.name, |
335 | res->owner); | 336 | tmpret, res->owner); |
336 | if (dlm_is_host_down(tmpret)) { | 337 | if (dlm_is_host_down(tmpret)) |
337 | ret = DLM_RECOVERING; | 338 | ret = DLM_RECOVERING; |
338 | mlog(0, "node %u died so returning DLM_RECOVERING " | 339 | else |
339 | "from lock message!\n", res->owner); | ||
340 | } else { | ||
341 | ret = dlm_err_to_dlm_status(tmpret); | 340 | ret = dlm_err_to_dlm_status(tmpret); |
342 | } | ||
343 | } | 341 | } |
344 | 342 | ||
345 | return ret; | 343 | return ret; |
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | |||
440 | /* zero memory only if kernel-allocated */ | 438 | /* zero memory only if kernel-allocated */ |
441 | lksb = kzalloc(sizeof(*lksb), GFP_NOFS); | 439 | lksb = kzalloc(sizeof(*lksb), GFP_NOFS); |
442 | if (!lksb) { | 440 | if (!lksb) { |
443 | kfree(lock); | 441 | kmem_cache_free(dlm_lock_cache, lock); |
444 | return NULL; | 442 | return NULL; |
445 | } | 443 | } |
446 | kernel_allocated = 1; | 444 | kernel_allocated = 1; |
@@ -718,18 +716,10 @@ retry_lock: | |||
718 | 716 | ||
719 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | 717 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || |
720 | status == DLM_FORWARD) { | 718 | status == DLM_FORWARD) { |
721 | mlog(0, "retrying lock with migration/" | ||
722 | "recovery/in progress\n"); | ||
723 | msleep(100); | 719 | msleep(100); |
724 | /* no waiting for dlm_reco_thread */ | ||
725 | if (recovery) { | 720 | if (recovery) { |
726 | if (status != DLM_RECOVERING) | 721 | if (status != DLM_RECOVERING) |
727 | goto retry_lock; | 722 | goto retry_lock; |
728 | |||
729 | mlog(0, "%s: got RECOVERING " | ||
730 | "for $RECOVERY lock, master " | ||
731 | "was %u\n", dlm->name, | ||
732 | res->owner); | ||
733 | /* wait to see the node go down, then | 723 | /* wait to see the node go down, then |
734 | * drop down and allow the lockres to | 724 | * drop down and allow the lockres to |
735 | * get cleaned up. need to remaster. */ | 725 | * get cleaned up. need to remaster. */ |
@@ -741,6 +731,14 @@ retry_lock: | |||
741 | } | 731 | } |
742 | } | 732 | } |
743 | 733 | ||
734 | /* Inflight taken in dlm_get_lock_resource() is dropped here */ | ||
735 | spin_lock(&res->spinlock); | ||
736 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
737 | spin_unlock(&res->spinlock); | ||
738 | |||
739 | dlm_lockres_calc_usage(dlm, res); | ||
740 | dlm_kick_thread(dlm, res); | ||
741 | |||
744 | if (status != DLM_NORMAL) { | 742 | if (status != DLM_NORMAL) { |
745 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | 743 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; |
746 | if (status != DLM_NOTQUEUED) | 744 | if (status != DLM_NOTQUEUED) |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 11eefb8c12e9..005261c333b0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -631,39 +631,54 @@ error: | |||
631 | return NULL; | 631 | return NULL; |
632 | } | 632 | } |
633 | 633 | ||
634 | void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | 634 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
635 | struct dlm_lock_resource *res, | 635 | struct dlm_lock_resource *res, int bit) |
636 | int new_lockres, | ||
637 | const char *file, | ||
638 | int line) | ||
639 | { | 636 | { |
640 | if (!new_lockres) | 637 | assert_spin_locked(&res->spinlock); |
641 | assert_spin_locked(&res->spinlock); | 638 | |
639 | mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, | ||
640 | res->lockname.name, bit, __builtin_return_address(0)); | ||
641 | |||
642 | set_bit(bit, res->refmap); | ||
643 | } | ||
644 | |||
645 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | ||
646 | struct dlm_lock_resource *res, int bit) | ||
647 | { | ||
648 | assert_spin_locked(&res->spinlock); | ||
649 | |||
650 | mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, | ||
651 | res->lockname.name, bit, __builtin_return_address(0)); | ||
652 | |||
653 | clear_bit(bit, res->refmap); | ||
654 | } | ||
655 | |||
656 | |||
657 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | ||
658 | struct dlm_lock_resource *res) | ||
659 | { | ||
660 | assert_spin_locked(&res->spinlock); | ||
642 | 661 | ||
643 | if (!test_bit(dlm->node_num, res->refmap)) { | ||
644 | BUG_ON(res->inflight_locks != 0); | ||
645 | dlm_lockres_set_refmap_bit(dlm->node_num, res); | ||
646 | } | ||
647 | res->inflight_locks++; | 662 | res->inflight_locks++; |
648 | mlog(0, "%s:%.*s: inflight++: now %u\n", | 663 | |
649 | dlm->name, res->lockname.len, res->lockname.name, | 664 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
650 | res->inflight_locks); | 665 | res->lockname.len, res->lockname.name, res->inflight_locks, |
666 | __builtin_return_address(0)); | ||
651 | } | 667 | } |
652 | 668 | ||
653 | void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, | 669 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
654 | struct dlm_lock_resource *res, | 670 | struct dlm_lock_resource *res) |
655 | const char *file, | ||
656 | int line) | ||
657 | { | 671 | { |
658 | assert_spin_locked(&res->spinlock); | 672 | assert_spin_locked(&res->spinlock); |
659 | 673 | ||
660 | BUG_ON(res->inflight_locks == 0); | 674 | BUG_ON(res->inflight_locks == 0); |
675 | |||
661 | res->inflight_locks--; | 676 | res->inflight_locks--; |
662 | mlog(0, "%s:%.*s: inflight--: now %u\n", | 677 | |
663 | dlm->name, res->lockname.len, res->lockname.name, | 678 | mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, |
664 | res->inflight_locks); | 679 | res->lockname.len, res->lockname.name, res->inflight_locks, |
665 | if (res->inflight_locks == 0) | 680 | __builtin_return_address(0)); |
666 | dlm_lockres_clear_refmap_bit(dlm->node_num, res); | 681 | |
667 | wake_up(&res->wq); | 682 | wake_up(&res->wq); |
668 | } | 683 | } |
669 | 684 | ||
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | |||
697 | unsigned int hash; | 712 | unsigned int hash; |
698 | int tries = 0; | 713 | int tries = 0; |
699 | int bit, wait_on_recovery = 0; | 714 | int bit, wait_on_recovery = 0; |
700 | int drop_inflight_if_nonlocal = 0; | ||
701 | 715 | ||
702 | BUG_ON(!lockid); | 716 | BUG_ON(!lockid); |
703 | 717 | ||
@@ -709,36 +723,33 @@ lookup: | |||
709 | spin_lock(&dlm->spinlock); | 723 | spin_lock(&dlm->spinlock); |
710 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); | 724 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); |
711 | if (tmpres) { | 725 | if (tmpres) { |
712 | int dropping_ref = 0; | ||
713 | |||
714 | spin_unlock(&dlm->spinlock); | 726 | spin_unlock(&dlm->spinlock); |
715 | |||
716 | spin_lock(&tmpres->spinlock); | 727 | spin_lock(&tmpres->spinlock); |
717 | /* We wait for the other thread that is mastering the resource */ | 728 | /* Wait on the thread that is mastering the resource */ |
718 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 729 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
719 | __dlm_wait_on_lockres(tmpres); | 730 | __dlm_wait_on_lockres(tmpres); |
720 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); | 731 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); |
732 | spin_unlock(&tmpres->spinlock); | ||
733 | dlm_lockres_put(tmpres); | ||
734 | tmpres = NULL; | ||
735 | goto lookup; | ||
721 | } | 736 | } |
722 | 737 | ||
723 | if (tmpres->owner == dlm->node_num) { | 738 | /* Wait on the resource purge to complete before continuing */ |
724 | BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); | 739 | if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { |
725 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | 740 | BUG_ON(tmpres->owner == dlm->node_num); |
726 | } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) | 741 | __dlm_wait_on_lockres_flags(tmpres, |
727 | dropping_ref = 1; | 742 | DLM_LOCK_RES_DROPPING_REF); |
728 | spin_unlock(&tmpres->spinlock); | ||
729 | |||
730 | /* wait until done messaging the master, drop our ref to allow | ||
731 | * the lockres to be purged, start over. */ | ||
732 | if (dropping_ref) { | ||
733 | spin_lock(&tmpres->spinlock); | ||
734 | __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); | ||
735 | spin_unlock(&tmpres->spinlock); | 743 | spin_unlock(&tmpres->spinlock); |
736 | dlm_lockres_put(tmpres); | 744 | dlm_lockres_put(tmpres); |
737 | tmpres = NULL; | 745 | tmpres = NULL; |
738 | goto lookup; | 746 | goto lookup; |
739 | } | 747 | } |
740 | 748 | ||
741 | mlog(0, "found in hash!\n"); | 749 | /* Grab inflight ref to pin the resource */ |
750 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | ||
751 | |||
752 | spin_unlock(&tmpres->spinlock); | ||
742 | if (res) | 753 | if (res) |
743 | dlm_lockres_put(res); | 754 | dlm_lockres_put(res); |
744 | res = tmpres; | 755 | res = tmpres; |
@@ -829,8 +840,8 @@ lookup: | |||
829 | * but they might own this lockres. wait on them. */ | 840 | * but they might own this lockres. wait on them. */ |
830 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 841 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
831 | if (bit < O2NM_MAX_NODES) { | 842 | if (bit < O2NM_MAX_NODES) { |
832 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 843 | mlog(0, "%s: res %.*s, At least one node (%d) " |
833 | "recover before lock mastery can begin\n", | 844 | "to recover before lock mastery can begin\n", |
834 | dlm->name, namelen, (char *)lockid, bit); | 845 | dlm->name, namelen, (char *)lockid, bit); |
835 | wait_on_recovery = 1; | 846 | wait_on_recovery = 1; |
836 | } | 847 | } |
@@ -843,12 +854,11 @@ lookup: | |||
843 | 854 | ||
844 | /* finally add the lockres to its hash bucket */ | 855 | /* finally add the lockres to its hash bucket */ |
845 | __dlm_insert_lockres(dlm, res); | 856 | __dlm_insert_lockres(dlm, res); |
846 | /* since this lockres is new it doesn't not require the spinlock */ | ||
847 | dlm_lockres_grab_inflight_ref_new(dlm, res); | ||
848 | 857 | ||
849 | /* if this node does not become the master make sure to drop | 858 | /* Grab inflight ref to pin the resource */ |
850 | * this inflight reference below */ | 859 | spin_lock(&res->spinlock); |
851 | drop_inflight_if_nonlocal = 1; | 860 | dlm_lockres_grab_inflight_ref(dlm, res); |
861 | spin_unlock(&res->spinlock); | ||
852 | 862 | ||
853 | /* get an extra ref on the mle in case this is a BLOCK | 863 | /* get an extra ref on the mle in case this is a BLOCK |
854 | * if so, the creator of the BLOCK may try to put the last | 864 | * if so, the creator of the BLOCK may try to put the last |
@@ -864,8 +874,8 @@ redo_request: | |||
864 | * dlm spinlock would be detectable be a change on the mle, | 874 | * dlm spinlock would be detectable be a change on the mle, |
865 | * so we only need to clear out the recovery map once. */ | 875 | * so we only need to clear out the recovery map once. */ |
866 | if (dlm_is_recovery_lock(lockid, namelen)) { | 876 | if (dlm_is_recovery_lock(lockid, namelen)) { |
867 | mlog(ML_NOTICE, "%s: recovery map is not empty, but " | 877 | mlog(0, "%s: Recovery map is not empty, but must " |
868 | "must master $RECOVERY lock now\n", dlm->name); | 878 | "master $RECOVERY lock now\n", dlm->name); |
869 | if (!dlm_pre_master_reco_lockres(dlm, res)) | 879 | if (!dlm_pre_master_reco_lockres(dlm, res)) |
870 | wait_on_recovery = 0; | 880 | wait_on_recovery = 0; |
871 | else { | 881 | else { |
@@ -883,8 +893,8 @@ redo_request: | |||
883 | spin_lock(&dlm->spinlock); | 893 | spin_lock(&dlm->spinlock); |
884 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | 894 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); |
885 | if (bit < O2NM_MAX_NODES) { | 895 | if (bit < O2NM_MAX_NODES) { |
886 | mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " | 896 | mlog(0, "%s: res %.*s, At least one node (%d) " |
887 | "recover before lock mastery can begin\n", | 897 | "to recover before lock mastery can begin\n", |
888 | dlm->name, namelen, (char *)lockid, bit); | 898 | dlm->name, namelen, (char *)lockid, bit); |
889 | wait_on_recovery = 1; | 899 | wait_on_recovery = 1; |
890 | } else | 900 | } else |
@@ -913,8 +923,8 @@ redo_request: | |||
913 | * yet, keep going until it does. this is how the | 923 | * yet, keep going until it does. this is how the |
914 | * master will know that asserts are needed back to | 924 | * master will know that asserts are needed back to |
915 | * the lower nodes. */ | 925 | * the lower nodes. */ |
916 | mlog(0, "%s:%.*s: requests only up to %u but master " | 926 | mlog(0, "%s: res %.*s, Requests only up to %u but " |
917 | "is %u, keep going\n", dlm->name, namelen, | 927 | "master is %u, keep going\n", dlm->name, namelen, |
918 | lockid, nodenum, mle->master); | 928 | lockid, nodenum, mle->master); |
919 | } | 929 | } |
920 | } | 930 | } |
@@ -924,13 +934,12 @@ wait: | |||
924 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | 934 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); |
925 | if (ret < 0) { | 935 | if (ret < 0) { |
926 | wait_on_recovery = 1; | 936 | wait_on_recovery = 1; |
927 | mlog(0, "%s:%.*s: node map changed, redo the " | 937 | mlog(0, "%s: res %.*s, Node map changed, redo the master " |
928 | "master request now, blocked=%d\n", | 938 | "request now, blocked=%d\n", dlm->name, res->lockname.len, |
929 | dlm->name, res->lockname.len, | ||
930 | res->lockname.name, blocked); | 939 | res->lockname.name, blocked); |
931 | if (++tries > 20) { | 940 | if (++tries > 20) { |
932 | mlog(ML_ERROR, "%s:%.*s: spinning on " | 941 | mlog(ML_ERROR, "%s: res %.*s, Spinning on " |
933 | "dlm_wait_for_lock_mastery, blocked=%d\n", | 942 | "dlm_wait_for_lock_mastery, blocked = %d\n", |
934 | dlm->name, res->lockname.len, | 943 | dlm->name, res->lockname.len, |
935 | res->lockname.name, blocked); | 944 | res->lockname.name, blocked); |
936 | dlm_print_one_lock_resource(res); | 945 | dlm_print_one_lock_resource(res); |
@@ -940,7 +949,8 @@ wait: | |||
940 | goto redo_request; | 949 | goto redo_request; |
941 | } | 950 | } |
942 | 951 | ||
943 | mlog(0, "lockres mastered by %u\n", res->owner); | 952 | mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, |
953 | res->lockname.name, res->owner); | ||
944 | /* make sure we never continue without this */ | 954 | /* make sure we never continue without this */ |
945 | BUG_ON(res->owner == O2NM_MAX_NODES); | 955 | BUG_ON(res->owner == O2NM_MAX_NODES); |
946 | 956 | ||
@@ -952,8 +962,6 @@ wait: | |||
952 | 962 | ||
953 | wake_waiters: | 963 | wake_waiters: |
954 | spin_lock(&res->spinlock); | 964 | spin_lock(&res->spinlock); |
955 | if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) | ||
956 | dlm_lockres_drop_inflight_ref(dlm, res); | ||
957 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 965 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
958 | spin_unlock(&res->spinlock); | 966 | spin_unlock(&res->spinlock); |
959 | wake_up(&res->wq); | 967 | wake_up(&res->wq); |
@@ -1426,9 +1434,7 @@ way_up_top: | |||
1426 | } | 1434 | } |
1427 | 1435 | ||
1428 | if (res->owner == dlm->node_num) { | 1436 | if (res->owner == dlm->node_num) { |
1429 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1437 | dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); |
1430 | dlm->name, namelen, name, request->node_idx); | ||
1431 | dlm_lockres_set_refmap_bit(request->node_idx, res); | ||
1432 | spin_unlock(&res->spinlock); | 1438 | spin_unlock(&res->spinlock); |
1433 | response = DLM_MASTER_RESP_YES; | 1439 | response = DLM_MASTER_RESP_YES; |
1434 | if (mle) | 1440 | if (mle) |
@@ -1493,10 +1499,8 @@ way_up_top: | |||
1493 | * go back and clean the mles on any | 1499 | * go back and clean the mles on any |
1494 | * other nodes */ | 1500 | * other nodes */ |
1495 | dispatch_assert = 1; | 1501 | dispatch_assert = 1; |
1496 | dlm_lockres_set_refmap_bit(request->node_idx, res); | 1502 | dlm_lockres_set_refmap_bit(dlm, res, |
1497 | mlog(0, "%s:%.*s: setting bit %u in refmap\n", | 1503 | request->node_idx); |
1498 | dlm->name, namelen, name, | ||
1499 | request->node_idx); | ||
1500 | } else | 1504 | } else |
1501 | response = DLM_MASTER_RESP_NO; | 1505 | response = DLM_MASTER_RESP_NO; |
1502 | } else { | 1506 | } else { |
@@ -1702,7 +1706,7 @@ again: | |||
1702 | "lockres, set the bit in the refmap\n", | 1706 | "lockres, set the bit in the refmap\n", |
1703 | namelen, lockname, to); | 1707 | namelen, lockname, to); |
1704 | spin_lock(&res->spinlock); | 1708 | spin_lock(&res->spinlock); |
1705 | dlm_lockres_set_refmap_bit(to, res); | 1709 | dlm_lockres_set_refmap_bit(dlm, res, to); |
1706 | spin_unlock(&res->spinlock); | 1710 | spin_unlock(&res->spinlock); |
1707 | } | 1711 | } |
1708 | } | 1712 | } |
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2187 | namelen = res->lockname.len; | 2191 | namelen = res->lockname.len; |
2188 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | 2192 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); |
2189 | 2193 | ||
2190 | mlog(0, "%s:%.*s: sending deref to %d\n", | ||
2191 | dlm->name, namelen, lockname, res->owner); | ||
2192 | memset(&deref, 0, sizeof(deref)); | 2194 | memset(&deref, 0, sizeof(deref)); |
2193 | deref.node_idx = dlm->node_num; | 2195 | deref.node_idx = dlm->node_num; |
2194 | deref.namelen = namelen; | 2196 | deref.namelen = namelen; |
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2197 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | 2199 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, |
2198 | &deref, sizeof(deref), res->owner, &r); | 2200 | &deref, sizeof(deref), res->owner, &r); |
2199 | if (ret < 0) | 2201 | if (ret < 0) |
2200 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 2202 | mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", |
2201 | "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, | 2203 | dlm->name, namelen, lockname, ret, res->owner); |
2202 | res->owner); | ||
2203 | else if (r < 0) { | 2204 | else if (r < 0) { |
2204 | /* BAD. other node says I did not have a ref. */ | 2205 | /* BAD. other node says I did not have a ref. */ |
2205 | mlog(ML_ERROR,"while dropping ref on %s:%.*s " | 2206 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
2206 | "(master=%u) got %d.\n", dlm->name, namelen, | 2207 | dlm->name, namelen, lockname, res->owner, r); |
2207 | lockname, res->owner, r); | ||
2208 | dlm_print_one_lock_resource(res); | 2208 | dlm_print_one_lock_resource(res); |
2209 | BUG(); | 2209 | BUG(); |
2210 | } | 2210 | } |
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
2260 | else { | 2260 | else { |
2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2261 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
2262 | if (test_bit(node, res->refmap)) { | 2262 | if (test_bit(node, res->refmap)) { |
2263 | dlm_lockres_clear_refmap_bit(node, res); | 2263 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
2264 | cleared = 1; | 2264 | cleared = 1; |
2265 | } | 2265 | } |
2266 | } | 2266 | } |
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) | |||
2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | 2320 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); |
2321 | if (test_bit(node, res->refmap)) { | 2321 | if (test_bit(node, res->refmap)) { |
2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | 2322 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); |
2323 | dlm_lockres_clear_refmap_bit(node, res); | 2323 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
2324 | cleared = 1; | 2324 | cleared = 1; |
2325 | } | 2325 | } |
2326 | spin_unlock(&res->spinlock); | 2326 | spin_unlock(&res->spinlock); |
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
2802 | BUG_ON(!list_empty(&lock->bast_list)); | 2802 | BUG_ON(!list_empty(&lock->bast_list)); |
2803 | BUG_ON(lock->ast_pending); | 2803 | BUG_ON(lock->ast_pending); |
2804 | BUG_ON(lock->bast_pending); | 2804 | BUG_ON(lock->bast_pending); |
2805 | dlm_lockres_clear_refmap_bit(lock->ml.node, res); | 2805 | dlm_lockres_clear_refmap_bit(dlm, res, |
2806 | lock->ml.node); | ||
2806 | list_del_init(&lock->list); | 2807 | list_del_init(&lock->list); |
2807 | dlm_lock_put(lock); | 2808 | dlm_lock_put(lock); |
2808 | /* In a normal unlock, we would have added a | 2809 | /* In a normal unlock, we would have added a |
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
2823 | mlog(0, "%s:%.*s: node %u had a ref to this " | 2824 | mlog(0, "%s:%.*s: node %u had a ref to this " |
2824 | "migrating lockres, clearing\n", dlm->name, | 2825 | "migrating lockres, clearing\n", dlm->name, |
2825 | res->lockname.len, res->lockname.name, bit); | 2826 | res->lockname.len, res->lockname.name, bit); |
2826 | dlm_lockres_clear_refmap_bit(bit, res); | 2827 | dlm_lockres_clear_refmap_bit(dlm, res, bit); |
2827 | } | 2828 | } |
2828 | bit++; | 2829 | bit++; |
2829 | } | 2830 | } |
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
2916 | &migrate, sizeof(migrate), nodenum, | 2917 | &migrate, sizeof(migrate), nodenum, |
2917 | &status); | 2918 | &status); |
2918 | if (ret < 0) { | 2919 | if (ret < 0) { |
2919 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 2920 | mlog(ML_ERROR, "%s: res %.*s, Error %d send " |
2920 | "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, | 2921 | "MIGRATE_REQUEST to node %u\n", dlm->name, |
2921 | dlm->key, nodenum); | 2922 | migrate.namelen, migrate.name, ret, nodenum); |
2922 | if (!dlm_is_host_down(ret)) { | 2923 | if (!dlm_is_host_down(ret)) { |
2923 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | 2924 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); |
2924 | BUG(); | 2925 | BUG(); |
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |||
2937 | dlm->name, res->lockname.len, res->lockname.name, | 2938 | dlm->name, res->lockname.len, res->lockname.name, |
2938 | nodenum); | 2939 | nodenum); |
2939 | spin_lock(&res->spinlock); | 2940 | spin_lock(&res->spinlock); |
2940 | dlm_lockres_set_refmap_bit(nodenum, res); | 2941 | dlm_lockres_set_refmap_bit(dlm, res, nodenum); |
2941 | spin_unlock(&res->spinlock); | 2942 | spin_unlock(&res->spinlock); |
2942 | } | 2943 | } |
2943 | } | 2944 | } |
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
3271 | * mastery reference here since old_master will briefly have | 3272 | * mastery reference here since old_master will briefly have |
3272 | * a reference after the migration completes */ | 3273 | * a reference after the migration completes */ |
3273 | spin_lock(&res->spinlock); | 3274 | spin_lock(&res->spinlock); |
3274 | dlm_lockres_set_refmap_bit(old_master, res); | 3275 | dlm_lockres_set_refmap_bit(dlm, res, old_master); |
3275 | spin_unlock(&res->spinlock); | 3276 | spin_unlock(&res->spinlock); |
3276 | 3277 | ||
3277 | mlog(0, "now time to do a migrate request to other nodes\n"); | 3278 | mlog(0, "now time to do a migrate request to other nodes\n"); |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 7efab6d28a21..01ebfd0bdad7 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | 364 | ||
365 | int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) | 365 | void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) |
366 | { | 366 | { |
367 | if (timeout) { | 367 | if (dlm_is_node_dead(dlm, node)) |
368 | mlog(ML_NOTICE, "%s: waiting %dms for notification of " | 368 | return; |
369 | "death of node %u\n", dlm->name, timeout, node); | 369 | |
370 | printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " | ||
371 | "domain %s\n", node, dlm->name); | ||
372 | |||
373 | if (timeout) | ||
370 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 374 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
371 | dlm_is_node_dead(dlm, node), | 375 | dlm_is_node_dead(dlm, node), |
372 | msecs_to_jiffies(timeout)); | 376 | msecs_to_jiffies(timeout)); |
373 | } else { | 377 | else |
374 | mlog(ML_NOTICE, "%s: waiting indefinitely for notification " | ||
375 | "of death of node %u\n", dlm->name, node); | ||
376 | wait_event(dlm->dlm_reco_thread_wq, | 378 | wait_event(dlm->dlm_reco_thread_wq, |
377 | dlm_is_node_dead(dlm, node)); | 379 | dlm_is_node_dead(dlm, node)); |
378 | } | ||
379 | /* for now, return 0 */ | ||
380 | return 0; | ||
381 | } | 380 | } |
382 | 381 | ||
383 | int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) | 382 | void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) |
384 | { | 383 | { |
385 | if (timeout) { | 384 | if (dlm_is_node_recovered(dlm, node)) |
386 | mlog(0, "%s: waiting %dms for notification of " | 385 | return; |
387 | "recovery of node %u\n", dlm->name, timeout, node); | 386 | |
387 | printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " | ||
388 | "domain %s\n", node, dlm->name); | ||
389 | |||
390 | if (timeout) | ||
388 | wait_event_timeout(dlm->dlm_reco_thread_wq, | 391 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
389 | dlm_is_node_recovered(dlm, node), | 392 | dlm_is_node_recovered(dlm, node), |
390 | msecs_to_jiffies(timeout)); | 393 | msecs_to_jiffies(timeout)); |
391 | } else { | 394 | else |
392 | mlog(0, "%s: waiting indefinitely for notification " | ||
393 | "of recovery of node %u\n", dlm->name, node); | ||
394 | wait_event(dlm->dlm_reco_thread_wq, | 395 | wait_event(dlm->dlm_reco_thread_wq, |
395 | dlm_is_node_recovered(dlm, node)); | 396 | dlm_is_node_recovered(dlm, node)); |
396 | } | ||
397 | /* for now, return 0 */ | ||
398 | return 0; | ||
399 | } | 397 | } |
400 | 398 | ||
401 | /* callers of the top-level api calls (dlmlock/dlmunlock) should | 399 | /* callers of the top-level api calls (dlmlock/dlmunlock) should |
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm) | |||
430 | { | 428 | { |
431 | spin_lock(&dlm->spinlock); | 429 | spin_lock(&dlm->spinlock); |
432 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | 430 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); |
431 | printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", | ||
432 | dlm->name, dlm->reco.dead_node); | ||
433 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; | 433 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; |
434 | spin_unlock(&dlm->spinlock); | 434 | spin_unlock(&dlm->spinlock); |
435 | } | 435 | } |
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm) | |||
440 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | 440 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); |
441 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | 441 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; |
442 | spin_unlock(&dlm->spinlock); | 442 | spin_unlock(&dlm->spinlock); |
443 | printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); | ||
443 | wake_up(&dlm->reco.event); | 444 | wake_up(&dlm->reco.event); |
444 | } | 445 | } |
445 | 446 | ||
447 | static void dlm_print_recovery_master(struct dlm_ctxt *dlm) | ||
448 | { | ||
449 | printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " | ||
450 | "dead node %u in domain %s\n", dlm->reco.new_master, | ||
451 | (dlm->node_num == dlm->reco.new_master ? "me" : "he"), | ||
452 | dlm->reco.dead_node, dlm->name); | ||
453 | } | ||
454 | |||
446 | static int dlm_do_recovery(struct dlm_ctxt *dlm) | 455 | static int dlm_do_recovery(struct dlm_ctxt *dlm) |
447 | { | 456 | { |
448 | int status = 0; | 457 | int status = 0; |
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) | |||
505 | } | 514 | } |
506 | mlog(0, "another node will master this recovery session.\n"); | 515 | mlog(0, "another node will master this recovery session.\n"); |
507 | } | 516 | } |
508 | mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", | 517 | |
509 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, | 518 | dlm_print_recovery_master(dlm); |
510 | dlm->node_num, dlm->reco.dead_node); | ||
511 | 519 | ||
512 | /* it is safe to start everything back up here | 520 | /* it is safe to start everything back up here |
513 | * because all of the dead node's lock resources | 521 | * because all of the dead node's lock resources |
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) | |||
518 | return 0; | 526 | return 0; |
519 | 527 | ||
520 | master_here: | 528 | master_here: |
521 | mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " | 529 | dlm_print_recovery_master(dlm); |
522 | "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), | ||
523 | dlm->node_num, dlm->reco.dead_node, dlm->name); | ||
524 | 530 | ||
525 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | 531 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); |
526 | if (status < 0) { | 532 | if (status < 0) { |
527 | /* we should never hit this anymore */ | 533 | /* we should never hit this anymore */ |
528 | mlog(ML_ERROR, "error %d remastering locks for node %u, " | 534 | mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " |
529 | "retrying.\n", status, dlm->reco.dead_node); | 535 | "retrying.\n", dlm->name, status, dlm->reco.dead_node); |
530 | /* yield a bit to allow any final network messages | 536 | /* yield a bit to allow any final network messages |
531 | * to get handled on remaining nodes */ | 537 | * to get handled on remaining nodes */ |
532 | msleep(100); | 538 | msleep(100); |
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
567 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); | 573 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); |
568 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | 574 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; |
569 | 575 | ||
570 | mlog(0, "requesting lock info from node %u\n", | 576 | mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, |
571 | ndata->node_num); | 577 | ndata->node_num); |
572 | 578 | ||
573 | if (ndata->node_num == dlm->node_num) { | 579 | if (ndata->node_num == dlm->node_num) { |
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |||
640 | spin_unlock(&dlm_reco_state_lock); | 646 | spin_unlock(&dlm_reco_state_lock); |
641 | } | 647 | } |
642 | 648 | ||
643 | mlog(0, "done requesting all lock info\n"); | 649 | mlog(0, "%s: Done requesting all lock info\n", dlm->name); |
644 | 650 | ||
645 | /* nodes should be sending reco data now | 651 | /* nodes should be sending reco data now |
646 | * just need to wait */ | 652 | * just need to wait */ |
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
802 | 808 | ||
803 | /* negative status is handled by caller */ | 809 | /* negative status is handled by caller */ |
804 | if (ret < 0) | 810 | if (ret < 0) |
805 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 811 | mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " |
806 | "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, | 812 | "to recover dead node %u\n", dlm->name, ret, |
807 | dlm->key, request_from); | 813 | request_from, dead_node); |
808 | |||
809 | // return from here, then | 814 | // return from here, then |
810 | // sleep until all received or error | 815 | // sleep until all received or error |
811 | return ret; | 816 | return ret; |
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | |||
956 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | 961 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, |
957 | sizeof(done_msg), send_to, &tmpret); | 962 | sizeof(done_msg), send_to, &tmpret); |
958 | if (ret < 0) { | 963 | if (ret < 0) { |
959 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 964 | mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " |
960 | "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, | 965 | "to recover dead node %u\n", dlm->name, ret, send_to, |
961 | dlm->key, send_to); | 966 | dead_node); |
962 | if (!dlm_is_host_down(ret)) { | 967 | if (!dlm_is_host_down(ret)) { |
963 | BUG(); | 968 | BUG(); |
964 | } | 969 | } |
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |||
1127 | if (ret < 0) { | 1132 | if (ret < 0) { |
1128 | /* XXX: negative status is not handled. | 1133 | /* XXX: negative status is not handled. |
1129 | * this will end up killing this node. */ | 1134 | * this will end up killing this node. */ |
1130 | mlog(ML_ERROR, "Error %d when sending message %u (key " | 1135 | mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " |
1131 | "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, | 1136 | "node %u (%s)\n", dlm->name, mres->lockname_len, |
1132 | dlm->key, send_to); | 1137 | mres->lockname, ret, send_to, |
1138 | (orig_flags & DLM_MRES_MIGRATION ? | ||
1139 | "migration" : "recovery")); | ||
1133 | } else { | 1140 | } else { |
1134 | /* might get an -ENOMEM back here */ | 1141 | /* might get an -ENOMEM back here */ |
1135 | ret = status; | 1142 | ret = status; |
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1767 | dlm->name, mres->lockname_len, mres->lockname, | 1774 | dlm->name, mres->lockname_len, mres->lockname, |
1768 | from); | 1775 | from); |
1769 | spin_lock(&res->spinlock); | 1776 | spin_lock(&res->spinlock); |
1770 | dlm_lockres_set_refmap_bit(from, res); | 1777 | dlm_lockres_set_refmap_bit(dlm, res, from); |
1771 | spin_unlock(&res->spinlock); | 1778 | spin_unlock(&res->spinlock); |
1772 | added++; | 1779 | added++; |
1773 | break; | 1780 | break; |
@@ -1965,7 +1972,7 @@ skip_lvb: | |||
1965 | mlog(0, "%s:%.*s: added lock for node %u, " | 1972 | mlog(0, "%s:%.*s: added lock for node %u, " |
1966 | "setting refmap bit\n", dlm->name, | 1973 | "setting refmap bit\n", dlm->name, |
1967 | res->lockname.len, res->lockname.name, ml->node); | 1974 | res->lockname.len, res->lockname.name, ml->node); |
1968 | dlm_lockres_set_refmap_bit(ml->node, res); | 1975 | dlm_lockres_set_refmap_bit(dlm, res, ml->node); |
1969 | added++; | 1976 | added++; |
1970 | } | 1977 | } |
1971 | spin_unlock(&res->spinlock); | 1978 | spin_unlock(&res->spinlock); |
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |||
2084 | 2091 | ||
2085 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { | 2092 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { |
2086 | if (res->owner == dead_node) { | 2093 | if (res->owner == dead_node) { |
2094 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", | ||
2095 | dlm->name, res->lockname.len, res->lockname.name, | ||
2096 | res->owner, new_master); | ||
2087 | list_del_init(&res->recovering); | 2097 | list_del_init(&res->recovering); |
2088 | spin_lock(&res->spinlock); | 2098 | spin_lock(&res->spinlock); |
2089 | /* new_master has our reference from | 2099 | /* new_master has our reference from |
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |||
2105 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 2115 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
2106 | bucket = dlm_lockres_hash(dlm, i); | 2116 | bucket = dlm_lockres_hash(dlm, i); |
2107 | hlist_for_each_entry(res, hash_iter, bucket, hash_node) { | 2117 | hlist_for_each_entry(res, hash_iter, bucket, hash_node) { |
2108 | if (res->state & DLM_LOCK_RES_RECOVERING) { | 2118 | if (!(res->state & DLM_LOCK_RES_RECOVERING)) |
2109 | if (res->owner == dead_node) { | 2119 | continue; |
2110 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
2111 | "was not on recovering list, but " | ||
2112 | "clearing state anyway\n", | ||
2113 | dlm->node_num, res->lockname.len, | ||
2114 | res->lockname.name, new_master); | ||
2115 | } else if (res->owner == dlm->node_num) { | ||
2116 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
2117 | "was not on recovering list, " | ||
2118 | "owner is THIS node, clearing\n", | ||
2119 | dlm->node_num, res->lockname.len, | ||
2120 | res->lockname.name, new_master); | ||
2121 | } else | ||
2122 | continue; | ||
2123 | 2120 | ||
2124 | if (!list_empty(&res->recovering)) { | 2121 | if (res->owner != dead_node && |
2125 | mlog(0, "%s:%.*s: lockres was " | 2122 | res->owner != dlm->node_num) |
2126 | "marked RECOVERING, owner=%u\n", | 2123 | continue; |
2127 | dlm->name, res->lockname.len, | 2124 | |
2128 | res->lockname.name, res->owner); | 2125 | if (!list_empty(&res->recovering)) { |
2129 | list_del_init(&res->recovering); | 2126 | list_del_init(&res->recovering); |
2130 | dlm_lockres_put(res); | 2127 | dlm_lockres_put(res); |
2131 | } | ||
2132 | spin_lock(&res->spinlock); | ||
2133 | /* new_master has our reference from | ||
2134 | * the lock state sent during recovery */ | ||
2135 | dlm_change_lockres_owner(dlm, res, new_master); | ||
2136 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
2137 | if (__dlm_lockres_has_locks(res)) | ||
2138 | __dlm_dirty_lockres(dlm, res); | ||
2139 | spin_unlock(&res->spinlock); | ||
2140 | wake_up(&res->wq); | ||
2141 | } | 2128 | } |
2129 | |||
2130 | /* new_master has our reference from | ||
2131 | * the lock state sent during recovery */ | ||
2132 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", | ||
2133 | dlm->name, res->lockname.len, res->lockname.name, | ||
2134 | res->owner, new_master); | ||
2135 | spin_lock(&res->spinlock); | ||
2136 | dlm_change_lockres_owner(dlm, res, new_master); | ||
2137 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
2138 | if (__dlm_lockres_has_locks(res)) | ||
2139 | __dlm_dirty_lockres(dlm, res); | ||
2140 | spin_unlock(&res->spinlock); | ||
2141 | wake_up(&res->wq); | ||
2142 | } | 2142 | } |
2143 | } | 2143 | } |
2144 | } | 2144 | } |
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | |||
2252 | res->lockname.len, res->lockname.name, freed, dead_node); | 2252 | res->lockname.len, res->lockname.name, freed, dead_node); |
2253 | __dlm_print_one_lock_resource(res); | 2253 | __dlm_print_one_lock_resource(res); |
2254 | } | 2254 | } |
2255 | dlm_lockres_clear_refmap_bit(dead_node, res); | 2255 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
2256 | } else if (test_bit(dead_node, res->refmap)) { | 2256 | } else if (test_bit(dead_node, res->refmap)) { |
2257 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " | 2257 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " |
2258 | "no locks and had not purged before dying\n", dlm->name, | 2258 | "no locks and had not purged before dying\n", dlm->name, |
2259 | res->lockname.len, res->lockname.name, dead_node); | 2259 | res->lockname.len, res->lockname.name, dead_node); |
2260 | dlm_lockres_clear_refmap_bit(dead_node, res); | 2260 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | /* do not kick thread yet */ | 2263 | /* do not kick thread yet */ |
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2324 | dlm_revalidate_lvb(dlm, res, dead_node); | 2324 | dlm_revalidate_lvb(dlm, res, dead_node); |
2325 | if (res->owner == dead_node) { | 2325 | if (res->owner == dead_node) { |
2326 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | 2326 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { |
2327 | mlog(ML_NOTICE, "Ignore %.*s for " | 2327 | mlog(ML_NOTICE, "%s: res %.*s, Skip " |
2328 | "recovery as it is being freed\n", | 2328 | "recovery as it is being freed\n", |
2329 | res->lockname.len, | 2329 | dlm->name, res->lockname.len, |
2330 | res->lockname.name); | 2330 | res->lockname.name); |
2331 | } else | 2331 | } else |
2332 | dlm_move_lockres_to_recovery_list(dlm, | 2332 | dlm_move_lockres_to_recovery_list(dlm, |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 1d6d1d22c471..e73c833fc2a1 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) | |||
94 | { | 94 | { |
95 | int bit; | 95 | int bit; |
96 | 96 | ||
97 | assert_spin_locked(&res->spinlock); | ||
98 | |||
97 | if (__dlm_lockres_has_locks(res)) | 99 | if (__dlm_lockres_has_locks(res)) |
98 | return 0; | 100 | return 0; |
99 | 101 | ||
102 | /* Locks are in the process of being created */ | ||
103 | if (res->inflight_locks) | ||
104 | return 0; | ||
105 | |||
100 | if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) | 106 | if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) |
101 | return 0; | 107 | return 0; |
102 | 108 | ||
103 | if (res->state & DLM_LOCK_RES_RECOVERING) | 109 | if (res->state & DLM_LOCK_RES_RECOVERING) |
104 | return 0; | 110 | return 0; |
105 | 111 | ||
112 | /* Another node has this resource with this node as the master */ | ||
106 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | 113 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); |
107 | if (bit < O2NM_MAX_NODES) | 114 | if (bit < O2NM_MAX_NODES) |
108 | return 0; | 115 | return 0; |
109 | 116 | ||
110 | /* | ||
111 | * since the bit for dlm->node_num is not set, inflight_locks better | ||
112 | * be zero | ||
113 | */ | ||
114 | BUG_ON(res->inflight_locks != 0); | ||
115 | return 1; | 117 | return 1; |
116 | } | 118 | } |
117 | 119 | ||
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
185 | /* clear our bit from the master's refmap, ignore errors */ | 187 | /* clear our bit from the master's refmap, ignore errors */ |
186 | ret = dlm_drop_lockres_ref(dlm, res); | 188 | ret = dlm_drop_lockres_ref(dlm, res); |
187 | if (ret < 0) { | 189 | if (ret < 0) { |
188 | mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, | ||
189 | res->lockname.len, res->lockname.name, ret); | ||
190 | if (!dlm_is_host_down(ret)) | 190 | if (!dlm_is_host_down(ret)) |
191 | BUG(); | 191 | BUG(); |
192 | } | 192 | } |
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
209 | BUG(); | 209 | BUG(); |
210 | } | 210 | } |
211 | 211 | ||
212 | __dlm_unhash_lockres(res); | 212 | __dlm_unhash_lockres(dlm, res); |
213 | 213 | ||
214 | /* lockres is not in the hash now. drop the flag and wake up | 214 | /* lockres is not in the hash now. drop the flag and wake up |
215 | * any processes waiting in dlm_get_lock_resource. */ | 215 | * any processes waiting in dlm_get_lock_resource. */ |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index e1ed5e502ff2..81a4cd22f80b 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode) | |||
1692 | mlog(0, "inode %llu take PRMODE open lock\n", | 1692 | mlog(0, "inode %llu take PRMODE open lock\n", |
1693 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 1693 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
1694 | 1694 | ||
1695 | if (ocfs2_mount_local(osb)) | 1695 | if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) |
1696 | goto out; | 1696 | goto out; |
1697 | 1697 | ||
1698 | lockres = &OCFS2_I(inode)->ip_open_lockres; | 1698 | lockres = &OCFS2_I(inode)->ip_open_lockres; |
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write) | |||
1718 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 1718 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
1719 | write ? "EXMODE" : "PRMODE"); | 1719 | write ? "EXMODE" : "PRMODE"); |
1720 | 1720 | ||
1721 | if (ocfs2_is_hard_readonly(osb)) { | ||
1722 | if (write) | ||
1723 | status = -EROFS; | ||
1724 | goto out; | ||
1725 | } | ||
1726 | |||
1721 | if (ocfs2_mount_local(osb)) | 1727 | if (ocfs2_mount_local(osb)) |
1722 | goto out; | 1728 | goto out; |
1723 | 1729 | ||
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, | |||
2298 | if (ocfs2_is_hard_readonly(osb)) { | 2304 | if (ocfs2_is_hard_readonly(osb)) { |
2299 | if (ex) | 2305 | if (ex) |
2300 | status = -EROFS; | 2306 | status = -EROFS; |
2301 | goto bail; | 2307 | goto getbh; |
2302 | } | 2308 | } |
2303 | 2309 | ||
2304 | if (ocfs2_mount_local(osb)) | 2310 | if (ocfs2_mount_local(osb)) |
@@ -2356,7 +2362,7 @@ local: | |||
2356 | mlog_errno(status); | 2362 | mlog_errno(status); |
2357 | goto bail; | 2363 | goto bail; |
2358 | } | 2364 | } |
2359 | 2365 | getbh: | |
2360 | if (ret_bh) { | 2366 | if (ret_bh) { |
2361 | status = ocfs2_assign_bh(inode, ret_bh, local_bh); | 2367 | status = ocfs2_assign_bh(inode, ret_bh, local_bh); |
2362 | if (status < 0) { | 2368 | if (status < 0) { |
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex) | |||
2628 | 2634 | ||
2629 | BUG_ON(!dl); | 2635 | BUG_ON(!dl); |
2630 | 2636 | ||
2631 | if (ocfs2_is_hard_readonly(osb)) | 2637 | if (ocfs2_is_hard_readonly(osb)) { |
2632 | return -EROFS; | 2638 | if (ex) |
2639 | return -EROFS; | ||
2640 | return 0; | ||
2641 | } | ||
2633 | 2642 | ||
2634 | if (ocfs2_mount_local(osb)) | 2643 | if (ocfs2_mount_local(osb)) |
2635 | return 0; | 2644 | return 0; |
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex) | |||
2647 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; | 2656 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; |
2648 | struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); | 2657 | struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); |
2649 | 2658 | ||
2650 | if (!ocfs2_mount_local(osb)) | 2659 | if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) |
2651 | ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); | 2660 | ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); |
2652 | } | 2661 | } |
2653 | 2662 | ||
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 23457b491e8c..2f5b92ef0e53 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -832,6 +832,102 @@ out: | |||
832 | return ret; | 832 | return ret; |
833 | } | 833 | } |
834 | 834 | ||
835 | int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin) | ||
836 | { | ||
837 | struct inode *inode = file->f_mapping->host; | ||
838 | int ret; | ||
839 | unsigned int is_last = 0, is_data = 0; | ||
840 | u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; | ||
841 | u32 cpos, cend, clen, hole_size; | ||
842 | u64 extoff, extlen; | ||
843 | struct buffer_head *di_bh = NULL; | ||
844 | struct ocfs2_extent_rec rec; | ||
845 | |||
846 | BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE); | ||
847 | |||
848 | ret = ocfs2_inode_lock(inode, &di_bh, 0); | ||
849 | if (ret) { | ||
850 | mlog_errno(ret); | ||
851 | goto out; | ||
852 | } | ||
853 | |||
854 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
855 | |||
856 | if (*offset >= inode->i_size) { | ||
857 | ret = -ENXIO; | ||
858 | goto out_unlock; | ||
859 | } | ||
860 | |||
861 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | ||
862 | if (origin == SEEK_HOLE) | ||
863 | *offset = inode->i_size; | ||
864 | goto out_unlock; | ||
865 | } | ||
866 | |||
867 | clen = 0; | ||
868 | cpos = *offset >> cs_bits; | ||
869 | cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size); | ||
870 | |||
871 | while (cpos < cend && !is_last) { | ||
872 | ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, | ||
873 | &rec, &is_last); | ||
874 | if (ret) { | ||
875 | mlog_errno(ret); | ||
876 | goto out_unlock; | ||
877 | } | ||
878 | |||
879 | extoff = cpos; | ||
880 | extoff <<= cs_bits; | ||
881 | |||
882 | if (rec.e_blkno == 0ULL) { | ||
883 | clen = hole_size; | ||
884 | is_data = 0; | ||
885 | } else { | ||
886 | clen = le16_to_cpu(rec.e_leaf_clusters) - | ||
887 | (cpos - le32_to_cpu(rec.e_cpos)); | ||
888 | is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1; | ||
889 | } | ||
890 | |||
891 | if ((!is_data && origin == SEEK_HOLE) || | ||
892 | (is_data && origin == SEEK_DATA)) { | ||
893 | if (extoff > *offset) | ||
894 | *offset = extoff; | ||
895 | goto out_unlock; | ||
896 | } | ||
897 | |||
898 | if (!is_last) | ||
899 | cpos += clen; | ||
900 | } | ||
901 | |||
902 | if (origin == SEEK_HOLE) { | ||
903 | extoff = cpos; | ||
904 | extoff <<= cs_bits; | ||
905 | extlen = clen; | ||
906 | extlen <<= cs_bits; | ||
907 | |||
908 | if ((extoff + extlen) > inode->i_size) | ||
909 | extlen = inode->i_size - extoff; | ||
910 | extoff += extlen; | ||
911 | if (extoff > *offset) | ||
912 | *offset = extoff; | ||
913 | goto out_unlock; | ||
914 | } | ||
915 | |||
916 | ret = -ENXIO; | ||
917 | |||
918 | out_unlock: | ||
919 | |||
920 | brelse(di_bh); | ||
921 | |||
922 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
923 | |||
924 | ocfs2_inode_unlock(inode, 0); | ||
925 | out: | ||
926 | if (ret && ret != -ENXIO) | ||
927 | ret = -ENXIO; | ||
928 | return ret; | ||
929 | } | ||
930 | |||
835 | int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, | 931 | int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, |
836 | struct buffer_head *bhs[], int flags, | 932 | struct buffer_head *bhs[], int flags, |
837 | int (*validate)(struct super_block *sb, | 933 | int (*validate)(struct super_block *sb, |
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index e79d41c2c909..67ea57d2fd59 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h | |||
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, | |||
53 | int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 53 | int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
54 | u64 map_start, u64 map_len); | 54 | u64 map_start, u64 map_len); |
55 | 55 | ||
56 | int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin); | ||
57 | |||
56 | int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, | 58 | int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, |
57 | u32 *p_cluster, u32 *num_clusters, | 59 | u32 *p_cluster, u32 *num_clusters, |
58 | struct ocfs2_extent_list *el, | 60 | struct ocfs2_extent_list *el, |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index de4ea1af041b..6e396683c3d4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, | |||
1950 | if (ret < 0) | 1950 | if (ret < 0) |
1951 | mlog_errno(ret); | 1951 | mlog_errno(ret); |
1952 | 1952 | ||
1953 | if (file->f_flags & O_SYNC) | ||
1954 | handle->h_sync = 1; | ||
1955 | |||
1953 | ocfs2_commit_trans(osb, handle); | 1956 | ocfs2_commit_trans(osb, handle); |
1954 | 1957 | ||
1955 | out_inode_unlock: | 1958 | out_inode_unlock: |
@@ -2052,6 +2055,23 @@ out: | |||
2052 | return ret; | 2055 | return ret; |
2053 | } | 2056 | } |
2054 | 2057 | ||
2058 | static void ocfs2_aiodio_wait(struct inode *inode) | ||
2059 | { | ||
2060 | wait_queue_head_t *wq = ocfs2_ioend_wq(inode); | ||
2061 | |||
2062 | wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0)); | ||
2063 | } | ||
2064 | |||
2065 | static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) | ||
2066 | { | ||
2067 | int blockmask = inode->i_sb->s_blocksize - 1; | ||
2068 | loff_t final_size = pos + count; | ||
2069 | |||
2070 | if ((pos & blockmask) || (final_size & blockmask)) | ||
2071 | return 1; | ||
2072 | return 0; | ||
2073 | } | ||
2074 | |||
2055 | static int ocfs2_prepare_inode_for_refcount(struct inode *inode, | 2075 | static int ocfs2_prepare_inode_for_refcount(struct inode *inode, |
2056 | struct file *file, | 2076 | struct file *file, |
2057 | loff_t pos, size_t count, | 2077 | loff_t pos, size_t count, |
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
2230 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 2250 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
2231 | int full_coherency = !(osb->s_mount_opt & | 2251 | int full_coherency = !(osb->s_mount_opt & |
2232 | OCFS2_MOUNT_COHERENCY_BUFFERED); | 2252 | OCFS2_MOUNT_COHERENCY_BUFFERED); |
2253 | int unaligned_dio = 0; | ||
2233 | 2254 | ||
2234 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, | 2255 | trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, |
2235 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 2256 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
@@ -2297,6 +2318,10 @@ relock: | |||
2297 | goto out; | 2318 | goto out; |
2298 | } | 2319 | } |
2299 | 2320 | ||
2321 | if (direct_io && !is_sync_kiocb(iocb)) | ||
2322 | unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left, | ||
2323 | *ppos); | ||
2324 | |||
2300 | /* | 2325 | /* |
2301 | * We can't complete the direct I/O as requested, fall back to | 2326 | * We can't complete the direct I/O as requested, fall back to |
2302 | * buffered I/O. | 2327 | * buffered I/O. |
@@ -2311,6 +2336,18 @@ relock: | |||
2311 | goto relock; | 2336 | goto relock; |
2312 | } | 2337 | } |
2313 | 2338 | ||
2339 | if (unaligned_dio) { | ||
2340 | /* | ||
2341 | * Wait on previous unaligned aio to complete before | ||
2342 | * proceeding. | ||
2343 | */ | ||
2344 | ocfs2_aiodio_wait(inode); | ||
2345 | |||
2346 | /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */ | ||
2347 | atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio); | ||
2348 | ocfs2_iocb_set_unaligned_aio(iocb); | ||
2349 | } | ||
2350 | |||
2314 | /* | 2351 | /* |
2315 | * To later detect whether a journal commit for sync writes is | 2352 | * To later detect whether a journal commit for sync writes is |
2316 | * necessary, we sample i_size, and cluster count here. | 2353 | * necessary, we sample i_size, and cluster count here. |
@@ -2382,8 +2419,12 @@ out_dio: | |||
2382 | if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { | 2419 | if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { |
2383 | rw_level = -1; | 2420 | rw_level = -1; |
2384 | have_alloc_sem = 0; | 2421 | have_alloc_sem = 0; |
2422 | unaligned_dio = 0; | ||
2385 | } | 2423 | } |
2386 | 2424 | ||
2425 | if (unaligned_dio) | ||
2426 | atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio); | ||
2427 | |||
2387 | out: | 2428 | out: |
2388 | if (rw_level != -1) | 2429 | if (rw_level != -1) |
2389 | ocfs2_rw_unlock(inode, rw_level); | 2430 | ocfs2_rw_unlock(inode, rw_level); |
@@ -2591,6 +2632,57 @@ bail: | |||
2591 | return ret; | 2632 | return ret; |
2592 | } | 2633 | } |
2593 | 2634 | ||
2635 | /* Refer generic_file_llseek_unlocked() */ | ||
2636 | static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin) | ||
2637 | { | ||
2638 | struct inode *inode = file->f_mapping->host; | ||
2639 | int ret = 0; | ||
2640 | |||
2641 | mutex_lock(&inode->i_mutex); | ||
2642 | |||
2643 | switch (origin) { | ||
2644 | case SEEK_SET: | ||
2645 | break; | ||
2646 | case SEEK_END: | ||
2647 | offset += inode->i_size; | ||
2648 | break; | ||
2649 | case SEEK_CUR: | ||
2650 | if (offset == 0) { | ||
2651 | offset = file->f_pos; | ||
2652 | goto out; | ||
2653 | } | ||
2654 | offset += file->f_pos; | ||
2655 | break; | ||
2656 | case SEEK_DATA: | ||
2657 | case SEEK_HOLE: | ||
2658 | ret = ocfs2_seek_data_hole_offset(file, &offset, origin); | ||
2659 | if (ret) | ||
2660 | goto out; | ||
2661 | break; | ||
2662 | default: | ||
2663 | ret = -EINVAL; | ||
2664 | goto out; | ||
2665 | } | ||
2666 | |||
2667 | if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) | ||
2668 | ret = -EINVAL; | ||
2669 | if (!ret && offset > inode->i_sb->s_maxbytes) | ||
2670 | ret = -EINVAL; | ||
2671 | if (ret) | ||
2672 | goto out; | ||
2673 | |||
2674 | if (offset != file->f_pos) { | ||
2675 | file->f_pos = offset; | ||
2676 | file->f_version = 0; | ||
2677 | } | ||
2678 | |||
2679 | out: | ||
2680 | mutex_unlock(&inode->i_mutex); | ||
2681 | if (ret) | ||
2682 | return ret; | ||
2683 | return offset; | ||
2684 | } | ||
2685 | |||
2594 | const struct inode_operations ocfs2_file_iops = { | 2686 | const struct inode_operations ocfs2_file_iops = { |
2595 | .setattr = ocfs2_setattr, | 2687 | .setattr = ocfs2_setattr, |
2596 | .getattr = ocfs2_getattr, | 2688 | .getattr = ocfs2_getattr, |
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = { | |||
2615 | * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! | 2707 | * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! |
2616 | */ | 2708 | */ |
2617 | const struct file_operations ocfs2_fops = { | 2709 | const struct file_operations ocfs2_fops = { |
2618 | .llseek = generic_file_llseek, | 2710 | .llseek = ocfs2_file_llseek, |
2619 | .read = do_sync_read, | 2711 | .read = do_sync_read, |
2620 | .write = do_sync_write, | 2712 | .write = do_sync_write, |
2621 | .mmap = ocfs2_mmap, | 2713 | .mmap = ocfs2_mmap, |
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = { | |||
2663 | * the cluster. | 2755 | * the cluster. |
2664 | */ | 2756 | */ |
2665 | const struct file_operations ocfs2_fops_no_plocks = { | 2757 | const struct file_operations ocfs2_fops_no_plocks = { |
2666 | .llseek = generic_file_llseek, | 2758 | .llseek = ocfs2_file_llseek, |
2667 | .read = do_sync_read, | 2759 | .read = do_sync_read, |
2668 | .write = do_sync_write, | 2760 | .write = do_sync_write, |
2669 | .mmap = ocfs2_mmap, | 2761 | .mmap = ocfs2_mmap, |
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index a22d2c098890..17454a904d7b 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c | |||
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode, | |||
951 | trace_ocfs2_cleanup_delete_inode( | 951 | trace_ocfs2_cleanup_delete_inode( |
952 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); | 952 | (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); |
953 | if (sync_data) | 953 | if (sync_data) |
954 | write_inode_now(inode, 1); | 954 | filemap_write_and_wait(inode->i_mapping); |
955 | truncate_inode_pages(&inode->i_data, 0); | 955 | truncate_inode_pages(&inode->i_data, 0); |
956 | } | 956 | } |
957 | 957 | ||
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 1c508b149b3a..88924a3133fa 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h | |||
@@ -43,6 +43,9 @@ struct ocfs2_inode_info | |||
43 | /* protects extended attribute changes on this inode */ | 43 | /* protects extended attribute changes on this inode */ |
44 | struct rw_semaphore ip_xattr_sem; | 44 | struct rw_semaphore ip_xattr_sem; |
45 | 45 | ||
46 | /* Number of outstanding AIO's which are not page aligned */ | ||
47 | atomic_t ip_unaligned_aio; | ||
48 | |||
46 | /* These fields are protected by ip_lock */ | 49 | /* These fields are protected by ip_lock */ |
47 | spinlock_t ip_lock; | 50 | spinlock_t ip_lock; |
48 | u32 ip_open_count; | 51 | u32 ip_open_count; |
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index bc91072b7219..726ff265b296 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, | |||
122 | if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & | 122 | if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & |
123 | (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { | 123 | (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { |
124 | if (!capable(CAP_LINUX_IMMUTABLE)) | 124 | if (!capable(CAP_LINUX_IMMUTABLE)) |
125 | goto bail_unlock; | 125 | goto bail_commit; |
126 | } | 126 | } |
127 | 127 | ||
128 | ocfs2_inode->ip_attr = flags; | 128 | ocfs2_inode->ip_attr = flags; |
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, | |||
132 | if (status < 0) | 132 | if (status < 0) |
133 | mlog_errno(status); | 133 | mlog_errno(status); |
134 | 134 | ||
135 | bail_commit: | ||
135 | ocfs2_commit_trans(osb, handle); | 136 | ocfs2_commit_trans(osb, handle); |
136 | bail_unlock: | 137 | bail_unlock: |
137 | ocfs2_inode_unlock(inode, 1); | 138 | ocfs2_inode_unlock(inode, 1); |
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode, | |||
381 | if (!oifi) { | 382 | if (!oifi) { |
382 | status = -ENOMEM; | 383 | status = -ENOMEM; |
383 | mlog_errno(status); | 384 | mlog_errno(status); |
384 | goto bail; | 385 | goto out_err; |
385 | } | 386 | } |
386 | 387 | ||
387 | if (o2info_from_user(*oifi, req)) | 388 | if (o2info_from_user(*oifi, req)) |
@@ -431,7 +432,7 @@ bail: | |||
431 | o2info_set_request_error(&oifi->ifi_req, req); | 432 | o2info_set_request_error(&oifi->ifi_req, req); |
432 | 433 | ||
433 | kfree(oifi); | 434 | kfree(oifi); |
434 | 435 | out_err: | |
435 | return status; | 436 | return status; |
436 | } | 437 | } |
437 | 438 | ||
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode, | |||
666 | if (!oiff) { | 667 | if (!oiff) { |
667 | status = -ENOMEM; | 668 | status = -ENOMEM; |
668 | mlog_errno(status); | 669 | mlog_errno(status); |
669 | goto bail; | 670 | goto out_err; |
670 | } | 671 | } |
671 | 672 | ||
672 | if (o2info_from_user(*oiff, req)) | 673 | if (o2info_from_user(*oiff, req)) |
@@ -716,7 +717,7 @@ bail: | |||
716 | o2info_set_request_error(&oiff->iff_req, req); | 717 | o2info_set_request_error(&oiff->iff_req, req); |
717 | 718 | ||
718 | kfree(oiff); | 719 | kfree(oiff); |
719 | 720 | out_err: | |
720 | return status; | 721 | return status; |
721 | } | 722 | } |
722 | 723 | ||
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 295d56454e8b..0a42ae96dca7 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1544 | /* we need to run complete recovery for offline orphan slots */ | 1544 | /* we need to run complete recovery for offline orphan slots */ |
1545 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); | 1545 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); |
1546 | 1546 | ||
1547 | mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", | 1547 | printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\ |
1548 | node_num, slot_num, | 1548 | "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), |
1549 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); | 1549 | MINOR(osb->sb->s_dev)); |
1550 | 1550 | ||
1551 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); | 1551 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); |
1552 | 1552 | ||
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, | |||
1601 | 1601 | ||
1602 | jbd2_journal_destroy(journal); | 1602 | jbd2_journal_destroy(journal); |
1603 | 1603 | ||
1604 | printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\ | ||
1605 | "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), | ||
1606 | MINOR(osb->sb->s_dev)); | ||
1604 | done: | 1607 | done: |
1605 | /* drop the lock on this nodes journal */ | 1608 | /* drop the lock on this nodes journal */ |
1606 | if (got_lock) | 1609 | if (got_lock) |
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void) | |||
1808 | * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This | 1811 | * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This |
1809 | * is done to catch any orphans that are left over in orphan directories. | 1812 | * is done to catch any orphans that are left over in orphan directories. |
1810 | * | 1813 | * |
1814 | * It scans all slots, even ones that are in use. It does so to handle the | ||
1815 | * case described below: | ||
1816 | * | ||
1817 | * Node 1 has an inode it was using. The dentry went away due to memory | ||
1818 | * pressure. Node 1 closes the inode, but it's on the free list. The node | ||
1819 | * has the open lock. | ||
1820 | * Node 2 unlinks the inode. It grabs the dentry lock to notify others, | ||
1821 | * but node 1 has no dentry and doesn't get the message. It trylocks the | ||
1822 | * open lock, sees that another node has a PR, and does nothing. | ||
1823 | * Later node 2 runs its orphan dir. It igets the inode, trylocks the | ||
1824 | * open lock, sees the PR still, and does nothing. | ||
1825 | * Basically, we have to trigger an orphan iput on node 1. The only way | ||
1826 | * for this to happen is if node 1 runs node 2's orphan dir. | ||
1827 | * | ||
1811 | * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT | 1828 | * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT |
1812 | * seconds. It gets an EX lock on os_lockres and checks sequence number | 1829 | * seconds. It gets an EX lock on os_lockres and checks sequence number |
1813 | * stored in LVB. If the sequence number has changed, it means some other | 1830 | * stored in LVB. If the sequence number has changed, it means some other |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 68cf2f6d3c6a..a3385b63ff5e 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir, | |||
441 | #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) | 441 | #define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) |
442 | 442 | ||
443 | /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota | 443 | /* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota |
444 | * update on dir + index leaf + dx root update for free list */ | 444 | * update on dir + index leaf + dx root update for free list + |
445 | * previous dirblock update in the free list */ | ||
445 | static inline int ocfs2_link_credits(struct super_block *sb) | 446 | static inline int ocfs2_link_credits(struct super_block *sb) |
446 | { | 447 | { |
447 | return 2*OCFS2_INODE_UPDATE_CREDITS + 3 + | 448 | return 2*OCFS2_INODE_UPDATE_CREDITS + 4 + |
448 | ocfs2_quota_trans_credits(sb); | 449 | ocfs2_quota_trans_credits(sb); |
449 | } | 450 | } |
450 | 451 | ||
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 3e9393ca39eb..9cd41083e991 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) | |||
61 | static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | 61 | static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, |
62 | struct page *page) | 62 | struct page *page) |
63 | { | 63 | { |
64 | int ret; | 64 | int ret = VM_FAULT_NOPAGE; |
65 | struct inode *inode = file->f_path.dentry->d_inode; | 65 | struct inode *inode = file->f_path.dentry->d_inode; |
66 | struct address_space *mapping = inode->i_mapping; | 66 | struct address_space *mapping = inode->i_mapping; |
67 | loff_t pos = page_offset(page); | 67 | loff_t pos = page_offset(page); |
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | |||
71 | void *fsdata; | 71 | void *fsdata; |
72 | loff_t size = i_size_read(inode); | 72 | loff_t size = i_size_read(inode); |
73 | 73 | ||
74 | /* | ||
75 | * Another node might have truncated while we were waiting on | ||
76 | * cluster locks. | ||
77 | * We don't check size == 0 before the shift. This is borrowed | ||
78 | * from do_generic_file_read. | ||
79 | */ | ||
80 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; | 74 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; |
81 | if (unlikely(!size || page->index > last_index)) { | ||
82 | ret = -EINVAL; | ||
83 | goto out; | ||
84 | } | ||
85 | 75 | ||
86 | /* | 76 | /* |
87 | * The i_size check above doesn't catch the case where nodes | 77 | * There are cases that lead to the page no longer bebongs to the |
88 | * truncated and then re-extended the file. We'll re-check the | 78 | * mapping. |
89 | * page mapping after taking the page lock inside of | 79 | * 1) pagecache truncates locally due to memory pressure. |
90 | * ocfs2_write_begin_nolock(). | 80 | * 2) pagecache truncates when another is taking EX lock against |
81 | * inode lock. see ocfs2_data_convert_worker. | ||
82 | * | ||
83 | * The i_size check doesn't catch the case where nodes truncated and | ||
84 | * then re-extended the file. We'll re-check the page mapping after | ||
85 | * taking the page lock inside of ocfs2_write_begin_nolock(). | ||
86 | * | ||
87 | * Let VM retry with these cases. | ||
91 | */ | 88 | */ |
92 | if (!PageUptodate(page) || page->mapping != inode->i_mapping) { | 89 | if ((page->mapping != inode->i_mapping) || |
93 | /* | 90 | (!PageUptodate(page)) || |
94 | * the page has been umapped in ocfs2_data_downconvert_worker. | 91 | (page_offset(page) >= size)) |
95 | * So return 0 here and let VFS retry. | ||
96 | */ | ||
97 | ret = 0; | ||
98 | goto out; | 92 | goto out; |
99 | } | ||
100 | 93 | ||
101 | /* | 94 | /* |
102 | * Call ocfs2_write_begin() and ocfs2_write_end() to take | 95 | * Call ocfs2_write_begin() and ocfs2_write_end() to take |
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, | |||
116 | if (ret) { | 109 | if (ret) { |
117 | if (ret != -ENOSPC) | 110 | if (ret != -ENOSPC) |
118 | mlog_errno(ret); | 111 | mlog_errno(ret); |
112 | if (ret == -ENOMEM) | ||
113 | ret = VM_FAULT_OOM; | ||
114 | else | ||
115 | ret = VM_FAULT_SIGBUS; | ||
119 | goto out; | 116 | goto out; |
120 | } | 117 | } |
121 | 118 | ||
122 | ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, | 119 | if (!locked_page) { |
123 | fsdata); | 120 | ret = VM_FAULT_NOPAGE; |
124 | if (ret < 0) { | ||
125 | mlog_errno(ret); | ||
126 | goto out; | 121 | goto out; |
127 | } | 122 | } |
123 | ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, | ||
124 | fsdata); | ||
128 | BUG_ON(ret != len); | 125 | BUG_ON(ret != len); |
129 | ret = 0; | 126 | ret = VM_FAULT_LOCKED; |
130 | out: | 127 | out: |
131 | return ret; | 128 | return ret; |
132 | } | 129 | } |
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
168 | 165 | ||
169 | out: | 166 | out: |
170 | ocfs2_unblock_signals(&oldset); | 167 | ocfs2_unblock_signals(&oldset); |
171 | if (ret) | ||
172 | ret = VM_FAULT_SIGBUS; | ||
173 | return ret; | 168 | return ret; |
174 | } | 169 | } |
175 | 170 | ||
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index d53cb706f14c..184c76b8c293 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, | |||
745 | */ | 745 | */ |
746 | ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, | 746 | ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, |
747 | new_phys_cpos); | 747 | new_phys_cpos); |
748 | if (!new_phys_cpos) { | 748 | if (!*new_phys_cpos) { |
749 | ret = -ENOSPC; | 749 | ret = -ENOSPC; |
750 | goto out_commit; | 750 | goto out_commit; |
751 | } | 751 | } |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 409285854f64..d355e6e36b36 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb, | |||
836 | 836 | ||
837 | static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) | 837 | static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap) |
838 | { | 838 | { |
839 | __test_and_set_bit_le(bit, bitmap); | 839 | __set_bit_le(bit, bitmap); |
840 | } | 840 | } |
841 | #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) | 841 | #define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr)) |
842 | 842 | ||
843 | static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) | 843 | static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap) |
844 | { | 844 | { |
845 | __test_and_clear_bit_le(bit, bitmap); | 845 | __clear_bit_le(bit, bitmap); |
846 | } | 846 | } |
847 | #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) | 847 | #define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr)) |
848 | 848 | ||
849 | #define ocfs2_test_bit test_bit_le | 849 | #define ocfs2_test_bit test_bit_le |
850 | #define ocfs2_find_next_zero_bit find_next_zero_bit_le | 850 | #define ocfs2_find_next_zero_bit find_next_zero_bit_le |
851 | #define ocfs2_find_next_bit find_next_bit_le | 851 | #define ocfs2_find_next_bit find_next_bit_le |
852 | |||
853 | static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr) | ||
854 | { | ||
855 | #if BITS_PER_LONG == 64 | ||
856 | *bit += ((unsigned long) addr & 7UL) << 3; | ||
857 | addr = (void *) ((unsigned long) addr & ~7UL); | ||
858 | #elif BITS_PER_LONG == 32 | ||
859 | *bit += ((unsigned long) addr & 3UL) << 3; | ||
860 | addr = (void *) ((unsigned long) addr & ~3UL); | ||
861 | #else | ||
862 | #error "how many bits you are?!" | ||
863 | #endif | ||
864 | return addr; | ||
865 | } | ||
866 | |||
867 | static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap) | ||
868 | { | ||
869 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
870 | ocfs2_set_bit(bit, bitmap); | ||
871 | } | ||
872 | |||
873 | static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap) | ||
874 | { | ||
875 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
876 | ocfs2_clear_bit(bit, bitmap); | ||
877 | } | ||
878 | |||
879 | static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap) | ||
880 | { | ||
881 | bitmap = correct_addr_and_bit_unaligned(&bit, bitmap); | ||
882 | return ocfs2_test_bit(bit, bitmap); | ||
883 | } | ||
884 | |||
885 | static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max, | ||
886 | int start) | ||
887 | { | ||
888 | int fix = 0, ret, tmpmax; | ||
889 | bitmap = correct_addr_and_bit_unaligned(&fix, bitmap); | ||
890 | tmpmax = max + fix; | ||
891 | start += fix; | ||
892 | |||
893 | ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix; | ||
894 | if (ret > max) | ||
895 | return max; | ||
896 | return ret; | ||
897 | } | ||
898 | |||
852 | #endif /* OCFS2_H */ | 899 | #endif /* OCFS2_H */ |
853 | 900 | ||
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index dc8007fc9247..f100bf70a906 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( | |||
404 | int status = 0; | 404 | int status = 0; |
405 | struct ocfs2_quota_recovery *rec; | 405 | struct ocfs2_quota_recovery *rec; |
406 | 406 | ||
407 | mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); | 407 | printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for " |
408 | "slot %u\n", osb->dev_str, slot_num); | ||
409 | |||
408 | rec = ocfs2_alloc_quota_recovery(); | 410 | rec = ocfs2_alloc_quota_recovery(); |
409 | if (!rec) | 411 | if (!rec) |
410 | return ERR_PTR(-ENOMEM); | 412 | return ERR_PTR(-ENOMEM); |
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, | |||
549 | goto out_commit; | 551 | goto out_commit; |
550 | } | 552 | } |
551 | lock_buffer(qbh); | 553 | lock_buffer(qbh); |
552 | WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap)); | 554 | WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap)); |
553 | ocfs2_clear_bit(bit, dchunk->dqc_bitmap); | 555 | ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap); |
554 | le32_add_cpu(&dchunk->dqc_free, 1); | 556 | le32_add_cpu(&dchunk->dqc_free, 1); |
555 | unlock_buffer(qbh); | 557 | unlock_buffer(qbh); |
556 | ocfs2_journal_dirty(handle, qbh); | 558 | ocfs2_journal_dirty(handle, qbh); |
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
596 | struct inode *lqinode; | 598 | struct inode *lqinode; |
597 | unsigned int flags; | 599 | unsigned int flags; |
598 | 600 | ||
599 | mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); | 601 | printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " |
602 | "slot %u\n", osb->dev_str, slot_num); | ||
603 | |||
600 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | 604 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
601 | for (type = 0; type < MAXQUOTAS; type++) { | 605 | for (type = 0; type < MAXQUOTAS; type++) { |
602 | if (list_empty(&(rec->r_list[type]))) | 606 | if (list_empty(&(rec->r_list[type]))) |
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
612 | /* Someone else is holding the lock? Then he must be | 616 | /* Someone else is holding the lock? Then he must be |
613 | * doing the recovery. Just skip the file... */ | 617 | * doing the recovery. Just skip the file... */ |
614 | if (status == -EAGAIN) { | 618 | if (status == -EAGAIN) { |
615 | mlog(ML_NOTICE, "skipping quota recovery for slot %d " | 619 | printk(KERN_NOTICE "ocfs2: Skipping quota recovery on " |
616 | "because quota file is locked.\n", slot_num); | 620 | "device (%s) for slot %d because quota file is " |
621 | "locked.\n", osb->dev_str, slot_num); | ||
617 | status = 0; | 622 | status = 0; |
618 | goto out_put; | 623 | goto out_put; |
619 | } else if (status < 0) { | 624 | } else if (status < 0) { |
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb, | |||
944 | * ol_quota_entries_per_block(sb); | 949 | * ol_quota_entries_per_block(sb); |
945 | } | 950 | } |
946 | 951 | ||
947 | found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0); | 952 | found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0); |
948 | /* We failed? */ | 953 | /* We failed? */ |
949 | if (found == len) { | 954 | if (found == len) { |
950 | mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" | 955 | mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u" |
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private) | |||
1208 | struct ocfs2_local_disk_chunk *dchunk; | 1213 | struct ocfs2_local_disk_chunk *dchunk; |
1209 | 1214 | ||
1210 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; | 1215 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; |
1211 | ocfs2_set_bit(*offset, dchunk->dqc_bitmap); | 1216 | ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap); |
1212 | le32_add_cpu(&dchunk->dqc_free, -1); | 1217 | le32_add_cpu(&dchunk->dqc_free, -1); |
1213 | } | 1218 | } |
1214 | 1219 | ||
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) | |||
1289 | (od->dq_chunk->qc_headerbh->b_data); | 1294 | (od->dq_chunk->qc_headerbh->b_data); |
1290 | /* Mark structure as freed */ | 1295 | /* Mark structure as freed */ |
1291 | lock_buffer(od->dq_chunk->qc_headerbh); | 1296 | lock_buffer(od->dq_chunk->qc_headerbh); |
1292 | ocfs2_clear_bit(offset, dchunk->dqc_bitmap); | 1297 | ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap); |
1293 | le32_add_cpu(&dchunk->dqc_free, 1); | 1298 | le32_add_cpu(&dchunk->dqc_free, 1); |
1294 | unlock_buffer(od->dq_chunk->qc_headerbh); | 1299 | unlock_buffer(od->dq_chunk->qc_headerbh); |
1295 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); | 1300 | ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); |
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 26fc0014d509..1424c151cccc 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c | |||
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb) | |||
493 | goto bail; | 493 | goto bail; |
494 | } | 494 | } |
495 | } else | 495 | } else |
496 | mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", | 496 | printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " |
497 | slot); | 497 | "allocated to this node!\n", slot, osb->dev_str); |
498 | 498 | ||
499 | ocfs2_set_slot(si, slot, osb->node_num); | 499 | ocfs2_set_slot(si, slot, osb->node_num); |
500 | osb->slot_num = slot; | 500 | osb->slot_num = slot; |
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 19965b00c43c..94368017edb3 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "cluster/masklog.h" | 28 | #include "cluster/masklog.h" |
29 | #include "cluster/nodemanager.h" | 29 | #include "cluster/nodemanager.h" |
30 | #include "cluster/heartbeat.h" | 30 | #include "cluster/heartbeat.h" |
31 | #include "cluster/tcp.h" | ||
31 | 32 | ||
32 | #include "stackglue.h" | 33 | #include "stackglue.h" |
33 | 34 | ||
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb) | |||
256 | } | 257 | } |
257 | 258 | ||
258 | /* | 259 | /* |
260 | * Check if this node is heartbeating and is connected to all other | ||
261 | * heartbeating nodes. | ||
262 | */ | ||
263 | static int o2cb_cluster_check(void) | ||
264 | { | ||
265 | u8 node_num; | ||
266 | int i; | ||
267 | unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
268 | unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
269 | |||
270 | node_num = o2nm_this_node(); | ||
271 | if (node_num == O2NM_MAX_NODES) { | ||
272 | printk(KERN_ERR "o2cb: This node has not been configured.\n"); | ||
273 | return -EINVAL; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * o2dlm expects o2net sockets to be created. If not, then | ||
278 | * dlm_join_domain() fails with a stack of errors which are both cryptic | ||
279 | * and incomplete. The idea here is to detect upfront whether we have | ||
280 | * managed to connect to all nodes or not. If not, then list the nodes | ||
281 | * to allow the user to check the configuration (incorrect IP, firewall, | ||
282 | * etc.) Yes, this is racy. But its not the end of the world. | ||
283 | */ | ||
284 | #define O2CB_MAP_STABILIZE_COUNT 60 | ||
285 | for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) { | ||
286 | o2hb_fill_node_map(hbmap, sizeof(hbmap)); | ||
287 | if (!test_bit(node_num, hbmap)) { | ||
288 | printk(KERN_ERR "o2cb: %s heartbeat has not been " | ||
289 | "started.\n", (o2hb_global_heartbeat_active() ? | ||
290 | "Global" : "Local")); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | o2net_fill_node_map(netmap, sizeof(netmap)); | ||
294 | /* Force set the current node to allow easy compare */ | ||
295 | set_bit(node_num, netmap); | ||
296 | if (!memcmp(hbmap, netmap, sizeof(hbmap))) | ||
297 | return 0; | ||
298 | if (i < O2CB_MAP_STABILIZE_COUNT) | ||
299 | msleep(1000); | ||
300 | } | ||
301 | |||
302 | printk(KERN_ERR "o2cb: This node could not connect to nodes:"); | ||
303 | i = -1; | ||
304 | while ((i = find_next_bit(hbmap, O2NM_MAX_NODES, | ||
305 | i + 1)) < O2NM_MAX_NODES) { | ||
306 | if (!test_bit(i, netmap)) | ||
307 | printk(" %u", i); | ||
308 | } | ||
309 | printk(".\n"); | ||
310 | |||
311 | return -ENOTCONN; | ||
312 | } | ||
313 | |||
314 | /* | ||
259 | * Called from the dlm when it's about to evict a node. This is how the | 315 | * Called from the dlm when it's about to evict a node. This is how the |
260 | * classic stack signals node death. | 316 | * classic stack signals node death. |
261 | */ | 317 | */ |
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data) | |||
263 | { | 319 | { |
264 | struct ocfs2_cluster_connection *conn = data; | 320 | struct ocfs2_cluster_connection *conn = data; |
265 | 321 | ||
266 | mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", | 322 | printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n", |
267 | node_num, conn->cc_namelen, conn->cc_name); | 323 | node_num, conn->cc_namelen, conn->cc_name); |
268 | 324 | ||
269 | conn->cc_recovery_handler(node_num, conn->cc_recovery_data); | 325 | conn->cc_recovery_handler(node_num, conn->cc_recovery_data); |
270 | } | 326 | } |
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) | |||
280 | BUG_ON(conn == NULL); | 336 | BUG_ON(conn == NULL); |
281 | BUG_ON(conn->cc_proto == NULL); | 337 | BUG_ON(conn->cc_proto == NULL); |
282 | 338 | ||
283 | /* for now we only have one cluster/node, make sure we see it | 339 | /* Ensure cluster stack is up and all nodes are connected */ |
284 | * in the heartbeat universe */ | 340 | rc = o2cb_cluster_check(); |
285 | if (!o2hb_check_local_node_heartbeating()) { | 341 | if (rc) { |
286 | if (o2hb_global_heartbeat_active()) | 342 | printk(KERN_ERR "o2cb: Cluster check failed. Fix errors " |
287 | mlog(ML_ERROR, "Global heartbeat not started\n"); | 343 | "before retrying.\n"); |
288 | rc = -EINVAL; | ||
289 | goto out; | 344 | goto out; |
290 | } | 345 | } |
291 | 346 | ||
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 56f61027236b..4994f8b0e604 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "ocfs1_fs_compat.h" | 54 | #include "ocfs1_fs_compat.h" |
55 | 55 | ||
56 | #include "alloc.h" | 56 | #include "alloc.h" |
57 | #include "aops.h" | ||
57 | #include "blockcheck.h" | 58 | #include "blockcheck.h" |
58 | #include "dlmglue.h" | 59 | #include "dlmglue.h" |
59 | #include "export.h" | 60 | #include "export.h" |
@@ -1107,9 +1108,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1107 | 1108 | ||
1108 | ocfs2_set_ro_flag(osb, 1); | 1109 | ocfs2_set_ro_flag(osb, 1); |
1109 | 1110 | ||
1110 | printk(KERN_NOTICE "Readonly device detected. No cluster " | 1111 | printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. " |
1111 | "services will be utilized for this mount. Recovery " | 1112 | "Cluster services will not be used for this mount. " |
1112 | "will be skipped.\n"); | 1113 | "Recovery will be skipped.\n", osb->dev_str); |
1113 | } | 1114 | } |
1114 | 1115 | ||
1115 | if (!ocfs2_is_hard_readonly(osb)) { | 1116 | if (!ocfs2_is_hard_readonly(osb)) { |
@@ -1616,12 +1617,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1616 | return 0; | 1617 | return 0; |
1617 | } | 1618 | } |
1618 | 1619 | ||
1620 | wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ]; | ||
1621 | |||
1619 | static int __init ocfs2_init(void) | 1622 | static int __init ocfs2_init(void) |
1620 | { | 1623 | { |
1621 | int status; | 1624 | int status, i; |
1622 | 1625 | ||
1623 | ocfs2_print_version(); | 1626 | ocfs2_print_version(); |
1624 | 1627 | ||
1628 | for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++) | ||
1629 | init_waitqueue_head(&ocfs2__ioend_wq[i]); | ||
1630 | |||
1625 | status = init_ocfs2_uptodate_cache(); | 1631 | status = init_ocfs2_uptodate_cache(); |
1626 | if (status < 0) { | 1632 | if (status < 0) { |
1627 | mlog_errno(status); | 1633 | mlog_errno(status); |
@@ -1760,7 +1766,7 @@ static void ocfs2_inode_init_once(void *data) | |||
1760 | ocfs2_extent_map_init(&oi->vfs_inode); | 1766 | ocfs2_extent_map_init(&oi->vfs_inode); |
1761 | INIT_LIST_HEAD(&oi->ip_io_markers); | 1767 | INIT_LIST_HEAD(&oi->ip_io_markers); |
1762 | oi->ip_dir_start_lookup = 0; | 1768 | oi->ip_dir_start_lookup = 0; |
1763 | 1769 | atomic_set(&oi->ip_unaligned_aio, 0); | |
1764 | init_rwsem(&oi->ip_alloc_sem); | 1770 | init_rwsem(&oi->ip_alloc_sem); |
1765 | init_rwsem(&oi->ip_xattr_sem); | 1771 | init_rwsem(&oi->ip_xattr_sem); |
1766 | mutex_init(&oi->ip_io_mutex); | 1772 | mutex_init(&oi->ip_io_mutex); |
@@ -1974,7 +1980,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) | |||
1974 | * If we failed before we got a uuid_str yet, we can't stop | 1980 | * If we failed before we got a uuid_str yet, we can't stop |
1975 | * heartbeat. Otherwise, do it. | 1981 | * heartbeat. Otherwise, do it. |
1976 | */ | 1982 | */ |
1977 | if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str) | 1983 | if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str && |
1984 | !ocfs2_is_hard_readonly(osb)) | ||
1978 | hangup_needed = 1; | 1985 | hangup_needed = 1; |
1979 | 1986 | ||
1980 | if (osb->cconn) | 1987 | if (osb->cconn) |
@@ -2353,7 +2360,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2353 | mlog_errno(status); | 2360 | mlog_errno(status); |
2354 | goto bail; | 2361 | goto bail; |
2355 | } | 2362 | } |
2356 | cleancache_init_shared_fs((char *)&uuid_net_key, sb); | 2363 | cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb); |
2357 | 2364 | ||
2358 | bail: | 2365 | bail: |
2359 | return status; | 2366 | return status; |
@@ -2462,8 +2469,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb) | |||
2462 | goto finally; | 2469 | goto finally; |
2463 | } | 2470 | } |
2464 | } else { | 2471 | } else { |
2465 | mlog(ML_NOTICE, "File system was not unmounted cleanly, " | 2472 | printk(KERN_NOTICE "ocfs2: File system on device (%s) was not " |
2466 | "recovering volume.\n"); | 2473 | "unmounted cleanly, recovering it.\n", osb->dev_str); |
2467 | } | 2474 | } |
2468 | 2475 | ||
2469 | local = ocfs2_mount_local(osb); | 2476 | local = ocfs2_mount_local(osb); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 194fb22ef79d..aa9e8777b09a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode, | |||
2376 | } | 2376 | } |
2377 | 2377 | ||
2378 | ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); | 2378 | ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); |
2379 | if (ret < 0) { | ||
2380 | mlog_errno(ret); | ||
2381 | break; | ||
2382 | } | ||
2383 | 2379 | ||
2384 | ocfs2_commit_trans(osb, ctxt.handle); | 2380 | ocfs2_commit_trans(osb, ctxt.handle); |
2385 | if (ctxt.meta_ac) { | 2381 | if (ctxt.meta_ac) { |
2386 | ocfs2_free_alloc_context(ctxt.meta_ac); | 2382 | ocfs2_free_alloc_context(ctxt.meta_ac); |
2387 | ctxt.meta_ac = NULL; | 2383 | ctxt.meta_ac = NULL; |
2388 | } | 2384 | } |
2385 | |||
2386 | if (ret < 0) { | ||
2387 | mlog_errno(ret); | ||
2388 | break; | ||
2389 | } | ||
2390 | |||
2389 | } | 2391 | } |
2390 | 2392 | ||
2391 | if (ctxt.meta_ac) | 2393 | if (ctxt.meta_ac) |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 586174168e2a..80e4645f7990 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
131 | K(i.freeswap), | 131 | K(i.freeswap), |
132 | K(global_page_state(NR_FILE_DIRTY)), | 132 | K(global_page_state(NR_FILE_DIRTY)), |
133 | K(global_page_state(NR_WRITEBACK)), | 133 | K(global_page_state(NR_WRITEBACK)), |
134 | K(global_page_state(NR_ANON_PAGES) | ||
135 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 134 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
135 | K(global_page_state(NR_ANON_PAGES) | ||
136 | + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * | 136 | + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * |
137 | HPAGE_PMD_NR | 137 | HPAGE_PMD_NR), |
138 | #else | ||
139 | K(global_page_state(NR_ANON_PAGES)), | ||
138 | #endif | 140 | #endif |
139 | ), | ||
140 | K(global_page_state(NR_FILE_MAPPED)), | 141 | K(global_page_state(NR_FILE_MAPPED)), |
141 | K(global_page_state(NR_SHMEM)), | 142 | K(global_page_state(NR_SHMEM)), |
142 | K(global_page_state(NR_SLAB_RECLAIMABLE) + | 143 | K(global_page_state(NR_SLAB_RECLAIMABLE) + |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 8a6ab666e9f8..2527a68057fc 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -31,7 +31,7 @@ static u64 get_idle_time(int cpu) | |||
31 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; | 31 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
32 | idle += arch_idle_time(cpu); | 32 | idle += arch_idle_time(cpu); |
33 | } else | 33 | } else |
34 | idle = usecs_to_cputime(idle_time); | 34 | idle = nsecs_to_jiffies64(1000 * idle_time); |
35 | 35 | ||
36 | return idle; | 36 | return idle; |
37 | } | 37 | } |
@@ -44,7 +44,7 @@ static u64 get_iowait_time(int cpu) | |||
44 | /* !NO_HZ so we can rely on cpustat.iowait */ | 44 | /* !NO_HZ so we can rely on cpustat.iowait */ |
45 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; | 45 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
46 | else | 46 | else |
47 | iowait = usecs_to_cputime(iowait_time); | 47 | iowait = nsecs_to_jiffies64(1000 * iowait_time); |
48 | 48 | ||
49 | return iowait; | 49 | return iowait; |
50 | } | 50 | } |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 2bd620f0d796..57bbf9078ac8 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi) | |||
167 | } | 167 | } |
168 | 168 | ||
169 | psinfo = psi; | 169 | psinfo = psi; |
170 | mutex_init(&psinfo->read_mutex); | ||
170 | spin_unlock(&pstore_lock); | 171 | spin_unlock(&pstore_lock); |
171 | 172 | ||
172 | if (owner && !try_module_get(owner)) { | 173 | if (owner && !try_module_get(owner)) { |
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register); | |||
195 | void pstore_get_records(int quiet) | 196 | void pstore_get_records(int quiet) |
196 | { | 197 | { |
197 | struct pstore_info *psi = psinfo; | 198 | struct pstore_info *psi = psinfo; |
199 | char *buf = NULL; | ||
198 | ssize_t size; | 200 | ssize_t size; |
199 | u64 id; | 201 | u64 id; |
200 | enum pstore_type_id type; | 202 | enum pstore_type_id type; |
201 | struct timespec time; | 203 | struct timespec time; |
202 | int failed = 0, rc; | 204 | int failed = 0, rc; |
203 | unsigned long flags; | ||
204 | 205 | ||
205 | if (!psi) | 206 | if (!psi) |
206 | return; | 207 | return; |
207 | 208 | ||
208 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 209 | mutex_lock(&psi->read_mutex); |
209 | rc = psi->open(psi); | 210 | rc = psi->open(psi); |
210 | if (rc) | 211 | if (rc) |
211 | goto out; | 212 | goto out; |
212 | 213 | ||
213 | while ((size = psi->read(&id, &type, &time, psi)) > 0) { | 214 | while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) { |
214 | rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size, | 215 | rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size, |
215 | time, psi); | 216 | time, psi); |
217 | kfree(buf); | ||
218 | buf = NULL; | ||
216 | if (rc && (rc != -EEXIST || !quiet)) | 219 | if (rc && (rc != -EEXIST || !quiet)) |
217 | failed++; | 220 | failed++; |
218 | } | 221 | } |
219 | psi->close(psi); | 222 | psi->close(psi); |
220 | out: | 223 | out: |
221 | spin_unlock_irqrestore(&psinfo->buf_lock, flags); | 224 | mutex_unlock(&psi->read_mutex); |
222 | 225 | ||
223 | if (failed) | 226 | if (failed) |
224 | printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", | 227 | printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n", |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 05d6b0e78c95..dba43c3ea3af 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path); | |||
449 | 449 | ||
450 | /* | 450 | /* |
451 | * Same as seq_path, but relative to supplied root. | 451 | * Same as seq_path, but relative to supplied root. |
452 | * | ||
453 | * root may be changed, see __d_path(). | ||
454 | */ | 452 | */ |
455 | int seq_path_root(struct seq_file *m, struct path *path, struct path *root, | 453 | int seq_path_root(struct seq_file *m, struct path *path, struct path *root, |
456 | char *esc) | 454 | char *esc) |
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, | |||
463 | char *p; | 461 | char *p; |
464 | 462 | ||
465 | p = __d_path(path, root, buf, size); | 463 | p = __d_path(path, root, buf, size); |
464 | if (!p) | ||
465 | return SEQ_SKIP; | ||
466 | res = PTR_ERR(p); | 466 | res = PTR_ERR(p); |
467 | if (!IS_ERR(p)) { | 467 | if (!IS_ERR(p)) { |
468 | char *end = mangle_path(buf, p, esc); | 468 | char *end = mangle_path(buf, p, esc); |
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, | |||
474 | } | 474 | } |
475 | seq_commit(m, res); | 475 | seq_commit(m, res); |
476 | 476 | ||
477 | return res < 0 ? res : 0; | 477 | return res < 0 && res != -ENAMETOOLONG ? res : 0; |
478 | } | 478 | } |
479 | 479 | ||
480 | /* | 480 | /* |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index b6c4b3795c4a..76e4266d2e7e 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp) | |||
42 | int count, i; | 42 | int count, i; |
43 | 43 | ||
44 | count = be32_to_cpu(aclp->acl_cnt); | 44 | count = be32_to_cpu(aclp->acl_cnt); |
45 | if (count > XFS_ACL_MAX_ENTRIES) | ||
46 | return ERR_PTR(-EFSCORRUPTED); | ||
45 | 47 | ||
46 | acl = posix_acl_alloc(count, GFP_KERNEL); | 48 | acl = posix_acl_alloc(count, GFP_KERNEL); |
47 | if (!acl) | 49 | if (!acl) |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index d4906e7c9787..c1b55e596551 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags) | |||
110 | /* | 110 | /* |
111 | * Query whether the requested number of additional bytes of extended | 111 | * Query whether the requested number of additional bytes of extended |
112 | * attribute space will be able to fit inline. | 112 | * attribute space will be able to fit inline. |
113 | * | ||
113 | * Returns zero if not, else the di_forkoff fork offset to be used in the | 114 | * Returns zero if not, else the di_forkoff fork offset to be used in the |
114 | * literal area for attribute data once the new bytes have been added. | 115 | * literal area for attribute data once the new bytes have been added. |
115 | * | 116 | * |
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
122 | int offset; | 123 | int offset; |
123 | int minforkoff; /* lower limit on valid forkoff locations */ | 124 | int minforkoff; /* lower limit on valid forkoff locations */ |
124 | int maxforkoff; /* upper limit on valid forkoff locations */ | 125 | int maxforkoff; /* upper limit on valid forkoff locations */ |
125 | int dsize; | 126 | int dsize; |
126 | xfs_mount_t *mp = dp->i_mount; | 127 | xfs_mount_t *mp = dp->i_mount; |
127 | 128 | ||
128 | offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */ | 129 | offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */ |
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
136 | return (offset >= minforkoff) ? minforkoff : 0; | 137 | return (offset >= minforkoff) ? minforkoff : 0; |
137 | } | 138 | } |
138 | 139 | ||
139 | if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { | 140 | /* |
140 | if (bytes <= XFS_IFORK_ASIZE(dp)) | 141 | * If the requested numbers of bytes is smaller or equal to the |
141 | return dp->i_d.di_forkoff; | 142 | * current attribute fork size we can always proceed. |
143 | * | ||
144 | * Note that if_bytes in the data fork might actually be larger than | ||
145 | * the current data fork size is due to delalloc extents. In that | ||
146 | * case either the extent count will go down when they are converted | ||
147 | * to real extents, or the delalloc conversion will take care of the | ||
148 | * literal area rebalancing. | ||
149 | */ | ||
150 | if (bytes <= XFS_IFORK_ASIZE(dp)) | ||
151 | return dp->i_d.di_forkoff; | ||
152 | |||
153 | /* | ||
154 | * For attr2 we can try to move the forkoff if there is space in the | ||
155 | * literal area, but for the old format we are done if there is no | ||
156 | * space in the fixed attribute fork. | ||
157 | */ | ||
158 | if (!(mp->m_flags & XFS_MOUNT_ATTR2)) | ||
142 | return 0; | 159 | return 0; |
143 | } | ||
144 | 160 | ||
145 | dsize = dp->i_df.if_bytes; | 161 | dsize = dp->i_df.if_bytes; |
146 | 162 | ||
147 | switch (dp->i_d.di_format) { | 163 | switch (dp->i_d.di_format) { |
148 | case XFS_DINODE_FMT_EXTENTS: | 164 | case XFS_DINODE_FMT_EXTENTS: |
149 | /* | 165 | /* |
150 | * If there is no attr fork and the data fork is extents, | 166 | * If there is no attr fork and the data fork is extents, |
151 | * determine if creating the default attr fork will result | 167 | * determine if creating the default attr fork will result |
152 | * in the extents form migrating to btree. If so, the | 168 | * in the extents form migrating to btree. If so, the |
153 | * minimum offset only needs to be the space required for | 169 | * minimum offset only needs to be the space required for |
154 | * the btree root. | 170 | * the btree root. |
155 | */ | 171 | */ |
156 | if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > | 172 | if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > |
157 | xfs_default_attroffset(dp)) | 173 | xfs_default_attroffset(dp)) |
158 | dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); | 174 | dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); |
159 | break; | 175 | break; |
160 | |||
161 | case XFS_DINODE_FMT_BTREE: | 176 | case XFS_DINODE_FMT_BTREE: |
162 | /* | 177 | /* |
163 | * If have data btree then keep forkoff if we have one, | 178 | * If we have a data btree then keep forkoff if we have one, |
164 | * otherwise we are adding a new attr, so then we set | 179 | * otherwise we are adding a new attr, so then we set |
165 | * minforkoff to where the btree root can finish so we have | 180 | * minforkoff to where the btree root can finish so we have |
166 | * plenty of room for attrs | 181 | * plenty of room for attrs |
167 | */ | 182 | */ |
168 | if (dp->i_d.di_forkoff) { | 183 | if (dp->i_d.di_forkoff) { |
169 | if (offset < dp->i_d.di_forkoff) | 184 | if (offset < dp->i_d.di_forkoff) |
170 | return 0; | 185 | return 0; |
171 | else | 186 | return dp->i_d.di_forkoff; |
172 | return dp->i_d.di_forkoff; | 187 | } |
173 | } else | 188 | dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot); |
174 | dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot); | ||
175 | break; | 189 | break; |
176 | } | 190 | } |
177 | 191 | ||
178 | /* | 192 | /* |
179 | * A data fork btree root must have space for at least | 193 | * A data fork btree root must have space for at least |
180 | * MINDBTPTRS key/ptr pairs if the data fork is small or empty. | 194 | * MINDBTPTRS key/ptr pairs if the data fork is small or empty. |
181 | */ | 195 | */ |
182 | minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); | 196 | minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); |
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
186 | maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); | 200 | maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); |
187 | maxforkoff = maxforkoff >> 3; /* rounded down */ | 201 | maxforkoff = maxforkoff >> 3; /* rounded down */ |
188 | 202 | ||
189 | if (offset >= minforkoff && offset < maxforkoff) | ||
190 | return offset; | ||
191 | if (offset >= maxforkoff) | 203 | if (offset >= maxforkoff) |
192 | return maxforkoff; | 204 | return maxforkoff; |
205 | if (offset >= minforkoff) | ||
206 | return offset; | ||
193 | return 0; | 207 | return 0; |
194 | } | 208 | } |
195 | 209 | ||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index c68baeb0974a..d0ab78837057 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc( | |||
2383 | int tryagain; | 2383 | int tryagain; |
2384 | int error; | 2384 | int error; |
2385 | 2385 | ||
2386 | ASSERT(ap->length); | ||
2387 | |||
2386 | mp = ap->ip->i_mount; | 2388 | mp = ap->ip->i_mount; |
2387 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; | 2389 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; |
2388 | if (unlikely(align)) { | 2390 | if (unlikely(align)) { |
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate( | |||
4629 | int error; | 4631 | int error; |
4630 | int rt; | 4632 | int rt; |
4631 | 4633 | ||
4634 | ASSERT(bma->length > 0); | ||
4635 | |||
4632 | rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); | 4636 | rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); |
4633 | 4637 | ||
4634 | /* | 4638 | /* |
@@ -4849,6 +4853,7 @@ xfs_bmapi_write( | |||
4849 | ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); | 4853 | ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); |
4850 | ASSERT(!(flags & XFS_BMAPI_IGSTATE)); | 4854 | ASSERT(!(flags & XFS_BMAPI_IGSTATE)); |
4851 | ASSERT(tp != NULL); | 4855 | ASSERT(tp != NULL); |
4856 | ASSERT(len > 0); | ||
4852 | 4857 | ||
4853 | whichfork = (flags & XFS_BMAPI_ATTRFORK) ? | 4858 | whichfork = (flags & XFS_BMAPI_ATTRFORK) ? |
4854 | XFS_ATTR_FORK : XFS_DATA_FORK; | 4859 | XFS_ATTR_FORK : XFS_DATA_FORK; |
@@ -4918,9 +4923,22 @@ xfs_bmapi_write( | |||
4918 | bma.eof = eof; | 4923 | bma.eof = eof; |
4919 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); | 4924 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); |
4920 | bma.wasdel = wasdelay; | 4925 | bma.wasdel = wasdelay; |
4921 | bma.length = len; | ||
4922 | bma.offset = bno; | 4926 | bma.offset = bno; |
4923 | 4927 | ||
4928 | /* | ||
4929 | * There's a 32/64 bit type mismatch between the | ||
4930 | * allocation length request (which can be 64 bits in | ||
4931 | * length) and the bma length request, which is | ||
4932 | * xfs_extlen_t and therefore 32 bits. Hence we have to | ||
4933 | * check for 32-bit overflows and handle them here. | ||
4934 | */ | ||
4935 | if (len > (xfs_filblks_t)MAXEXTLEN) | ||
4936 | bma.length = MAXEXTLEN; | ||
4937 | else | ||
4938 | bma.length = len; | ||
4939 | |||
4940 | ASSERT(len > 0); | ||
4941 | ASSERT(bma.length > 0); | ||
4924 | error = xfs_bmapi_allocate(&bma, flags); | 4942 | error = xfs_bmapi_allocate(&bma, flags); |
4925 | if (error) | 4943 | if (error) |
4926 | goto error0; | 4944 | goto error0; |
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index da108977b21f..558910f5e3c0 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c | |||
@@ -98,22 +98,22 @@ xfs_fs_encode_fh( | |||
98 | switch (fileid_type) { | 98 | switch (fileid_type) { |
99 | case FILEID_INO32_GEN_PARENT: | 99 | case FILEID_INO32_GEN_PARENT: |
100 | spin_lock(&dentry->d_lock); | 100 | spin_lock(&dentry->d_lock); |
101 | fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino; | 101 | fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; |
102 | fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; | 102 | fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; |
103 | spin_unlock(&dentry->d_lock); | 103 | spin_unlock(&dentry->d_lock); |
104 | /*FALLTHRU*/ | 104 | /*FALLTHRU*/ |
105 | case FILEID_INO32_GEN: | 105 | case FILEID_INO32_GEN: |
106 | fid->i32.ino = inode->i_ino; | 106 | fid->i32.ino = XFS_I(inode)->i_ino; |
107 | fid->i32.gen = inode->i_generation; | 107 | fid->i32.gen = inode->i_generation; |
108 | break; | 108 | break; |
109 | case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: | 109 | case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: |
110 | spin_lock(&dentry->d_lock); | 110 | spin_lock(&dentry->d_lock); |
111 | fid64->parent_ino = dentry->d_parent->d_inode->i_ino; | 111 | fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; |
112 | fid64->parent_gen = dentry->d_parent->d_inode->i_generation; | 112 | fid64->parent_gen = dentry->d_parent->d_inode->i_generation; |
113 | spin_unlock(&dentry->d_lock); | 113 | spin_unlock(&dentry->d_lock); |
114 | /*FALLTHRU*/ | 114 | /*FALLTHRU*/ |
115 | case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: | 115 | case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: |
116 | fid64->ino = inode->i_ino; | 116 | fid64->ino = XFS_I(inode)->i_ino; |
117 | fid64->gen = inode->i_generation; | 117 | fid64->gen = inode->i_generation; |
118 | break; | 118 | break; |
119 | } | 119 | } |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c0237c602f11..755ee8164880 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -2835,6 +2835,27 @@ corrupt_out: | |||
2835 | return XFS_ERROR(EFSCORRUPTED); | 2835 | return XFS_ERROR(EFSCORRUPTED); |
2836 | } | 2836 | } |
2837 | 2837 | ||
2838 | void | ||
2839 | xfs_promote_inode( | ||
2840 | struct xfs_inode *ip) | ||
2841 | { | ||
2842 | struct xfs_buf *bp; | ||
2843 | |||
2844 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | ||
2845 | |||
2846 | bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno, | ||
2847 | ip->i_imap.im_len, XBF_TRYLOCK); | ||
2848 | if (!bp) | ||
2849 | return; | ||
2850 | |||
2851 | if (XFS_BUF_ISDELAYWRITE(bp)) { | ||
2852 | xfs_buf_delwri_promote(bp); | ||
2853 | wake_up_process(ip->i_mount->m_ddev_targp->bt_task); | ||
2854 | } | ||
2855 | |||
2856 | xfs_buf_relse(bp); | ||
2857 | } | ||
2858 | |||
2838 | /* | 2859 | /* |
2839 | * Return a pointer to the extent record at file index idx. | 2860 | * Return a pointer to the extent record at file index idx. |
2840 | */ | 2861 | */ |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 760140d1dd66..b4cd4739f98e 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); | |||
498 | void xfs_iext_realloc(xfs_inode_t *, int, int); | 498 | void xfs_iext_realloc(xfs_inode_t *, int, int); |
499 | void xfs_iunpin_wait(xfs_inode_t *); | 499 | void xfs_iunpin_wait(xfs_inode_t *); |
500 | int xfs_iflush(xfs_inode_t *, uint); | 500 | int xfs_iflush(xfs_inode_t *, uint); |
501 | void xfs_promote_inode(struct xfs_inode *); | ||
501 | void xfs_lock_inodes(xfs_inode_t **, int, uint); | 502 | void xfs_lock_inodes(xfs_inode_t **, int, uint); |
502 | void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); | 503 | void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); |
503 | 504 | ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a14cd89fe465..34817adf4b9e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -150,6 +150,117 @@ xlog_grant_add_space( | |||
150 | } while (head_val != old); | 150 | } while (head_val != old); |
151 | } | 151 | } |
152 | 152 | ||
153 | STATIC bool | ||
154 | xlog_reserveq_wake( | ||
155 | struct log *log, | ||
156 | int *free_bytes) | ||
157 | { | ||
158 | struct xlog_ticket *tic; | ||
159 | int need_bytes; | ||
160 | |||
161 | list_for_each_entry(tic, &log->l_reserveq, t_queue) { | ||
162 | if (tic->t_flags & XLOG_TIC_PERM_RESERV) | ||
163 | need_bytes = tic->t_unit_res * tic->t_cnt; | ||
164 | else | ||
165 | need_bytes = tic->t_unit_res; | ||
166 | |||
167 | if (*free_bytes < need_bytes) | ||
168 | return false; | ||
169 | *free_bytes -= need_bytes; | ||
170 | |||
171 | trace_xfs_log_grant_wake_up(log, tic); | ||
172 | wake_up(&tic->t_wait); | ||
173 | } | ||
174 | |||
175 | return true; | ||
176 | } | ||
177 | |||
178 | STATIC bool | ||
179 | xlog_writeq_wake( | ||
180 | struct log *log, | ||
181 | int *free_bytes) | ||
182 | { | ||
183 | struct xlog_ticket *tic; | ||
184 | int need_bytes; | ||
185 | |||
186 | list_for_each_entry(tic, &log->l_writeq, t_queue) { | ||
187 | ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); | ||
188 | |||
189 | need_bytes = tic->t_unit_res; | ||
190 | |||
191 | if (*free_bytes < need_bytes) | ||
192 | return false; | ||
193 | *free_bytes -= need_bytes; | ||
194 | |||
195 | trace_xfs_log_regrant_write_wake_up(log, tic); | ||
196 | wake_up(&tic->t_wait); | ||
197 | } | ||
198 | |||
199 | return true; | ||
200 | } | ||
201 | |||
202 | STATIC int | ||
203 | xlog_reserveq_wait( | ||
204 | struct log *log, | ||
205 | struct xlog_ticket *tic, | ||
206 | int need_bytes) | ||
207 | { | ||
208 | list_add_tail(&tic->t_queue, &log->l_reserveq); | ||
209 | |||
210 | do { | ||
211 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
212 | goto shutdown; | ||
213 | xlog_grant_push_ail(log, need_bytes); | ||
214 | |||
215 | XFS_STATS_INC(xs_sleep_logspace); | ||
216 | trace_xfs_log_grant_sleep(log, tic); | ||
217 | |||
218 | xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); | ||
219 | trace_xfs_log_grant_wake(log, tic); | ||
220 | |||
221 | spin_lock(&log->l_grant_reserve_lock); | ||
222 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
223 | goto shutdown; | ||
224 | } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes); | ||
225 | |||
226 | list_del_init(&tic->t_queue); | ||
227 | return 0; | ||
228 | shutdown: | ||
229 | list_del_init(&tic->t_queue); | ||
230 | return XFS_ERROR(EIO); | ||
231 | } | ||
232 | |||
233 | STATIC int | ||
234 | xlog_writeq_wait( | ||
235 | struct log *log, | ||
236 | struct xlog_ticket *tic, | ||
237 | int need_bytes) | ||
238 | { | ||
239 | list_add_tail(&tic->t_queue, &log->l_writeq); | ||
240 | |||
241 | do { | ||
242 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
243 | goto shutdown; | ||
244 | xlog_grant_push_ail(log, need_bytes); | ||
245 | |||
246 | XFS_STATS_INC(xs_sleep_logspace); | ||
247 | trace_xfs_log_regrant_write_sleep(log, tic); | ||
248 | |||
249 | xlog_wait(&tic->t_wait, &log->l_grant_write_lock); | ||
250 | trace_xfs_log_regrant_write_wake(log, tic); | ||
251 | |||
252 | spin_lock(&log->l_grant_write_lock); | ||
253 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
254 | goto shutdown; | ||
255 | } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes); | ||
256 | |||
257 | list_del_init(&tic->t_queue); | ||
258 | return 0; | ||
259 | shutdown: | ||
260 | list_del_init(&tic->t_queue); | ||
261 | return XFS_ERROR(EIO); | ||
262 | } | ||
263 | |||
153 | static void | 264 | static void |
154 | xlog_tic_reset_res(xlog_ticket_t *tic) | 265 | xlog_tic_reset_res(xlog_ticket_t *tic) |
155 | { | 266 | { |
@@ -350,8 +461,19 @@ xfs_log_reserve( | |||
350 | retval = xlog_grant_log_space(log, internal_ticket); | 461 | retval = xlog_grant_log_space(log, internal_ticket); |
351 | } | 462 | } |
352 | 463 | ||
464 | if (unlikely(retval)) { | ||
465 | /* | ||
466 | * If we are failing, make sure the ticket doesn't have any | ||
467 | * current reservations. We don't want to add this back | ||
468 | * when the ticket/ transaction gets cancelled. | ||
469 | */ | ||
470 | internal_ticket->t_curr_res = 0; | ||
471 | /* ungrant will give back unit_res * t_cnt. */ | ||
472 | internal_ticket->t_cnt = 0; | ||
473 | } | ||
474 | |||
353 | return retval; | 475 | return retval; |
354 | } /* xfs_log_reserve */ | 476 | } |
355 | 477 | ||
356 | 478 | ||
357 | /* | 479 | /* |
@@ -2481,8 +2603,8 @@ restart: | |||
2481 | /* | 2603 | /* |
2482 | * Atomically get the log space required for a log ticket. | 2604 | * Atomically get the log space required for a log ticket. |
2483 | * | 2605 | * |
2484 | * Once a ticket gets put onto the reserveq, it will only return after | 2606 | * Once a ticket gets put onto the reserveq, it will only return after the |
2485 | * the needed reservation is satisfied. | 2607 | * needed reservation is satisfied. |
2486 | * | 2608 | * |
2487 | * This function is structured so that it has a lock free fast path. This is | 2609 | * This function is structured so that it has a lock free fast path. This is |
2488 | * necessary because every new transaction reservation will come through this | 2610 | * necessary because every new transaction reservation will come through this |
@@ -2490,113 +2612,53 @@ restart: | |||
2490 | * every pass. | 2612 | * every pass. |
2491 | * | 2613 | * |
2492 | * As tickets are only ever moved on and off the reserveq under the | 2614 | * As tickets are only ever moved on and off the reserveq under the |
2493 | * l_grant_reserve_lock, we only need to take that lock if we are going | 2615 | * l_grant_reserve_lock, we only need to take that lock if we are going to add |
2494 | * to add the ticket to the queue and sleep. We can avoid taking the lock if the | 2616 | * the ticket to the queue and sleep. We can avoid taking the lock if the ticket |
2495 | * ticket was never added to the reserveq because the t_queue list head will be | 2617 | * was never added to the reserveq because the t_queue list head will be empty |
2496 | * empty and we hold the only reference to it so it can safely be checked | 2618 | * and we hold the only reference to it so it can safely be checked unlocked. |
2497 | * unlocked. | ||
2498 | */ | 2619 | */ |
2499 | STATIC int | 2620 | STATIC int |
2500 | xlog_grant_log_space(xlog_t *log, | 2621 | xlog_grant_log_space( |
2501 | xlog_ticket_t *tic) | 2622 | struct log *log, |
2623 | struct xlog_ticket *tic) | ||
2502 | { | 2624 | { |
2503 | int free_bytes; | 2625 | int free_bytes, need_bytes; |
2504 | int need_bytes; | 2626 | int error = 0; |
2505 | 2627 | ||
2506 | #ifdef DEBUG | 2628 | ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); |
2507 | if (log->l_flags & XLOG_ACTIVE_RECOVERY) | ||
2508 | panic("grant Recovery problem"); | ||
2509 | #endif | ||
2510 | 2629 | ||
2511 | trace_xfs_log_grant_enter(log, tic); | 2630 | trace_xfs_log_grant_enter(log, tic); |
2512 | 2631 | ||
2632 | /* | ||
2633 | * If there are other waiters on the queue then give them a chance at | ||
2634 | * logspace before us. Wake up the first waiters, if we do not wake | ||
2635 | * up all the waiters then go to sleep waiting for more free space, | ||
2636 | * otherwise try to get some space for this transaction. | ||
2637 | */ | ||
2513 | need_bytes = tic->t_unit_res; | 2638 | need_bytes = tic->t_unit_res; |
2514 | if (tic->t_flags & XFS_LOG_PERM_RESERV) | 2639 | if (tic->t_flags & XFS_LOG_PERM_RESERV) |
2515 | need_bytes *= tic->t_ocnt; | 2640 | need_bytes *= tic->t_ocnt; |
2516 | |||
2517 | /* something is already sleeping; insert new transaction at end */ | ||
2518 | if (!list_empty_careful(&log->l_reserveq)) { | ||
2519 | spin_lock(&log->l_grant_reserve_lock); | ||
2520 | /* recheck the queue now we are locked */ | ||
2521 | if (list_empty(&log->l_reserveq)) { | ||
2522 | spin_unlock(&log->l_grant_reserve_lock); | ||
2523 | goto redo; | ||
2524 | } | ||
2525 | list_add_tail(&tic->t_queue, &log->l_reserveq); | ||
2526 | |||
2527 | trace_xfs_log_grant_sleep1(log, tic); | ||
2528 | |||
2529 | /* | ||
2530 | * Gotta check this before going to sleep, while we're | ||
2531 | * holding the grant lock. | ||
2532 | */ | ||
2533 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
2534 | goto error_return; | ||
2535 | |||
2536 | XFS_STATS_INC(xs_sleep_logspace); | ||
2537 | xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); | ||
2538 | |||
2539 | /* | ||
2540 | * If we got an error, and the filesystem is shutting down, | ||
2541 | * we'll catch it down below. So just continue... | ||
2542 | */ | ||
2543 | trace_xfs_log_grant_wake1(log, tic); | ||
2544 | } | ||
2545 | |||
2546 | redo: | ||
2547 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
2548 | goto error_return_unlocked; | ||
2549 | |||
2550 | free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); | 2641 | free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); |
2551 | if (free_bytes < need_bytes) { | 2642 | if (!list_empty_careful(&log->l_reserveq)) { |
2552 | spin_lock(&log->l_grant_reserve_lock); | 2643 | spin_lock(&log->l_grant_reserve_lock); |
2553 | if (list_empty(&tic->t_queue)) | 2644 | if (!xlog_reserveq_wake(log, &free_bytes) || |
2554 | list_add_tail(&tic->t_queue, &log->l_reserveq); | 2645 | free_bytes < need_bytes) |
2555 | 2646 | error = xlog_reserveq_wait(log, tic, need_bytes); | |
2556 | trace_xfs_log_grant_sleep2(log, tic); | 2647 | spin_unlock(&log->l_grant_reserve_lock); |
2557 | 2648 | } else if (free_bytes < need_bytes) { | |
2558 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
2559 | goto error_return; | ||
2560 | |||
2561 | xlog_grant_push_ail(log, need_bytes); | ||
2562 | |||
2563 | XFS_STATS_INC(xs_sleep_logspace); | ||
2564 | xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); | ||
2565 | |||
2566 | trace_xfs_log_grant_wake2(log, tic); | ||
2567 | goto redo; | ||
2568 | } | ||
2569 | |||
2570 | if (!list_empty(&tic->t_queue)) { | ||
2571 | spin_lock(&log->l_grant_reserve_lock); | 2649 | spin_lock(&log->l_grant_reserve_lock); |
2572 | list_del_init(&tic->t_queue); | 2650 | error = xlog_reserveq_wait(log, tic, need_bytes); |
2573 | spin_unlock(&log->l_grant_reserve_lock); | 2651 | spin_unlock(&log->l_grant_reserve_lock); |
2574 | } | 2652 | } |
2653 | if (error) | ||
2654 | return error; | ||
2575 | 2655 | ||
2576 | /* we've got enough space */ | ||
2577 | xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); | 2656 | xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); |
2578 | xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); | 2657 | xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); |
2579 | trace_xfs_log_grant_exit(log, tic); | 2658 | trace_xfs_log_grant_exit(log, tic); |
2580 | xlog_verify_grant_tail(log); | 2659 | xlog_verify_grant_tail(log); |
2581 | return 0; | 2660 | return 0; |
2582 | 2661 | } | |
2583 | error_return_unlocked: | ||
2584 | spin_lock(&log->l_grant_reserve_lock); | ||
2585 | error_return: | ||
2586 | list_del_init(&tic->t_queue); | ||
2587 | spin_unlock(&log->l_grant_reserve_lock); | ||
2588 | trace_xfs_log_grant_error(log, tic); | ||
2589 | |||
2590 | /* | ||
2591 | * If we are failing, make sure the ticket doesn't have any | ||
2592 | * current reservations. We don't want to add this back when | ||
2593 | * the ticket/transaction gets cancelled. | ||
2594 | */ | ||
2595 | tic->t_curr_res = 0; | ||
2596 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ | ||
2597 | return XFS_ERROR(EIO); | ||
2598 | } /* xlog_grant_log_space */ | ||
2599 | |||
2600 | 2662 | ||
2601 | /* | 2663 | /* |
2602 | * Replenish the byte reservation required by moving the grant write head. | 2664 | * Replenish the byte reservation required by moving the grant write head. |
@@ -2605,10 +2667,12 @@ error_return: | |||
2605 | * free fast path. | 2667 | * free fast path. |
2606 | */ | 2668 | */ |
2607 | STATIC int | 2669 | STATIC int |
2608 | xlog_regrant_write_log_space(xlog_t *log, | 2670 | xlog_regrant_write_log_space( |
2609 | xlog_ticket_t *tic) | 2671 | struct log *log, |
2672 | struct xlog_ticket *tic) | ||
2610 | { | 2673 | { |
2611 | int free_bytes, need_bytes; | 2674 | int free_bytes, need_bytes; |
2675 | int error = 0; | ||
2612 | 2676 | ||
2613 | tic->t_curr_res = tic->t_unit_res; | 2677 | tic->t_curr_res = tic->t_unit_res; |
2614 | xlog_tic_reset_res(tic); | 2678 | xlog_tic_reset_res(tic); |
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log, | |||
2616 | if (tic->t_cnt > 0) | 2680 | if (tic->t_cnt > 0) |
2617 | return 0; | 2681 | return 0; |
2618 | 2682 | ||
2619 | #ifdef DEBUG | 2683 | ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); |
2620 | if (log->l_flags & XLOG_ACTIVE_RECOVERY) | ||
2621 | panic("regrant Recovery problem"); | ||
2622 | #endif | ||
2623 | 2684 | ||
2624 | trace_xfs_log_regrant_write_enter(log, tic); | 2685 | trace_xfs_log_regrant_write_enter(log, tic); |
2625 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
2626 | goto error_return_unlocked; | ||
2627 | 2686 | ||
2628 | /* If there are other waiters on the queue then give them a | 2687 | /* |
2629 | * chance at logspace before us. Wake up the first waiters, | 2688 | * If there are other waiters on the queue then give them a chance at |
2630 | * if we do not wake up all the waiters then go to sleep waiting | 2689 | * logspace before us. Wake up the first waiters, if we do not wake |
2631 | * for more free space, otherwise try to get some space for | 2690 | * up all the waiters then go to sleep waiting for more free space, |
2632 | * this transaction. | 2691 | * otherwise try to get some space for this transaction. |
2633 | */ | 2692 | */ |
2634 | need_bytes = tic->t_unit_res; | 2693 | need_bytes = tic->t_unit_res; |
2635 | if (!list_empty_careful(&log->l_writeq)) { | ||
2636 | struct xlog_ticket *ntic; | ||
2637 | |||
2638 | spin_lock(&log->l_grant_write_lock); | ||
2639 | free_bytes = xlog_space_left(log, &log->l_grant_write_head); | ||
2640 | list_for_each_entry(ntic, &log->l_writeq, t_queue) { | ||
2641 | ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); | ||
2642 | |||
2643 | if (free_bytes < ntic->t_unit_res) | ||
2644 | break; | ||
2645 | free_bytes -= ntic->t_unit_res; | ||
2646 | wake_up(&ntic->t_wait); | ||
2647 | } | ||
2648 | |||
2649 | if (ntic != list_first_entry(&log->l_writeq, | ||
2650 | struct xlog_ticket, t_queue)) { | ||
2651 | if (list_empty(&tic->t_queue)) | ||
2652 | list_add_tail(&tic->t_queue, &log->l_writeq); | ||
2653 | trace_xfs_log_regrant_write_sleep1(log, tic); | ||
2654 | |||
2655 | xlog_grant_push_ail(log, need_bytes); | ||
2656 | |||
2657 | XFS_STATS_INC(xs_sleep_logspace); | ||
2658 | xlog_wait(&tic->t_wait, &log->l_grant_write_lock); | ||
2659 | trace_xfs_log_regrant_write_wake1(log, tic); | ||
2660 | } else | ||
2661 | spin_unlock(&log->l_grant_write_lock); | ||
2662 | } | ||
2663 | |||
2664 | redo: | ||
2665 | if (XLOG_FORCED_SHUTDOWN(log)) | ||
2666 | goto error_return_unlocked; | ||
2667 | |||
2668 | free_bytes = xlog_space_left(log, &log->l_grant_write_head); | 2694 | free_bytes = xlog_space_left(log, &log->l_grant_write_head); |
2669 | if (free_bytes < need_bytes) { | 2695 | if (!list_empty_careful(&log->l_writeq)) { |
2670 | spin_lock(&log->l_grant_write_lock); | 2696 | spin_lock(&log->l_grant_write_lock); |
2671 | if (list_empty(&tic->t_queue)) | 2697 | if (!xlog_writeq_wake(log, &free_bytes) || |
2672 | list_add_tail(&tic->t_queue, &log->l_writeq); | 2698 | free_bytes < need_bytes) |
2673 | 2699 | error = xlog_writeq_wait(log, tic, need_bytes); | |
2674 | if (XLOG_FORCED_SHUTDOWN(log)) | 2700 | spin_unlock(&log->l_grant_write_lock); |
2675 | goto error_return; | 2701 | } else if (free_bytes < need_bytes) { |
2676 | |||
2677 | xlog_grant_push_ail(log, need_bytes); | ||
2678 | |||
2679 | XFS_STATS_INC(xs_sleep_logspace); | ||
2680 | trace_xfs_log_regrant_write_sleep2(log, tic); | ||
2681 | xlog_wait(&tic->t_wait, &log->l_grant_write_lock); | ||
2682 | |||
2683 | trace_xfs_log_regrant_write_wake2(log, tic); | ||
2684 | goto redo; | ||
2685 | } | ||
2686 | |||
2687 | if (!list_empty(&tic->t_queue)) { | ||
2688 | spin_lock(&log->l_grant_write_lock); | 2702 | spin_lock(&log->l_grant_write_lock); |
2689 | list_del_init(&tic->t_queue); | 2703 | error = xlog_writeq_wait(log, tic, need_bytes); |
2690 | spin_unlock(&log->l_grant_write_lock); | 2704 | spin_unlock(&log->l_grant_write_lock); |
2691 | } | 2705 | } |
2692 | 2706 | ||
2693 | /* we've got enough space */ | 2707 | if (error) |
2708 | return error; | ||
2709 | |||
2694 | xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); | 2710 | xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); |
2695 | trace_xfs_log_regrant_write_exit(log, tic); | 2711 | trace_xfs_log_regrant_write_exit(log, tic); |
2696 | xlog_verify_grant_tail(log); | 2712 | xlog_verify_grant_tail(log); |
2697 | return 0; | 2713 | return 0; |
2698 | 2714 | } | |
2699 | |||
2700 | error_return_unlocked: | ||
2701 | spin_lock(&log->l_grant_write_lock); | ||
2702 | error_return: | ||
2703 | list_del_init(&tic->t_queue); | ||
2704 | spin_unlock(&log->l_grant_write_lock); | ||
2705 | trace_xfs_log_regrant_write_error(log, tic); | ||
2706 | |||
2707 | /* | ||
2708 | * If we are failing, make sure the ticket doesn't have any | ||
2709 | * current reservations. We don't want to add this back when | ||
2710 | * the ticket/transaction gets cancelled. | ||
2711 | */ | ||
2712 | tic->t_curr_res = 0; | ||
2713 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ | ||
2714 | return XFS_ERROR(EIO); | ||
2715 | } /* xlog_regrant_write_log_space */ | ||
2716 | |||
2717 | 2715 | ||
2718 | /* The first cnt-1 times through here we don't need to | 2716 | /* The first cnt-1 times through here we don't need to |
2719 | * move the grant write head because the permanent | 2717 | * move the grant write head because the permanent |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 5cff443f6cdb..0bbb1a41998b 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -674,7 +674,8 @@ xfs_qm_dqattach_one( | |||
674 | * disk and we didn't ask it to allocate; | 674 | * disk and we didn't ask it to allocate; |
675 | * ESRCH if quotas got turned off suddenly. | 675 | * ESRCH if quotas got turned off suddenly. |
676 | */ | 676 | */ |
677 | error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp); | 677 | error = xfs_qm_dqget(ip->i_mount, ip, id, type, |
678 | doalloc | XFS_QMOPT_DOWARN, &dqp); | ||
678 | if (error) | 679 | if (error) |
679 | return error; | 680 | return error; |
680 | 681 | ||
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index aa3dc1a4d53d..be5c51d8f757 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c | |||
@@ -770,6 +770,17 @@ restart: | |||
770 | if (!xfs_iflock_nowait(ip)) { | 770 | if (!xfs_iflock_nowait(ip)) { |
771 | if (!(sync_mode & SYNC_WAIT)) | 771 | if (!(sync_mode & SYNC_WAIT)) |
772 | goto out; | 772 | goto out; |
773 | |||
774 | /* | ||
775 | * If we only have a single dirty inode in a cluster there is | ||
776 | * a fair chance that the AIL push may have pushed it into | ||
777 | * the buffer, but xfsbufd won't touch it until 30 seconds | ||
778 | * from now, and thus we will lock up here. | ||
779 | * | ||
780 | * Promote the inode buffer to the front of the delwri list | ||
781 | * and wake up xfsbufd now. | ||
782 | */ | ||
783 | xfs_promote_inode(ip); | ||
773 | xfs_iflock(ip); | 784 | xfs_iflock(ip); |
774 | } | 785 | } |
775 | 786 | ||
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index f1d2802b2f07..494035798873 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); | |||
834 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); | 834 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); |
835 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); | 835 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); |
836 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); | 836 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); |
837 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); | 837 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep); |
838 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); | 838 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake); |
839 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); | ||
840 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); | ||
841 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); | 839 | DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); |
842 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); | 840 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); |
843 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); | 841 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); |
844 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); | 842 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); |
845 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); | 843 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep); |
846 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); | 844 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake); |
847 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); | ||
848 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); | ||
849 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); | 845 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); |
850 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); | 846 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); |
851 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); | 847 | DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); |