diff options
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 2da83d566592..8e9462626ec5 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf) | |||
634 | spin_unlock_irq(&conf->device_lock); | 634 | spin_unlock_irq(&conf->device_lock); |
635 | } | 635 | } |
636 | 636 | ||
637 | static void md_kick_device(mddev_t *mddev) | ||
638 | { | ||
639 | blk_flush_plug(current); | ||
640 | md_wakeup_thread(mddev->thread); | ||
641 | } | ||
642 | |||
643 | /* Barriers.... | 637 | /* Barriers.... |
644 | * Sometimes we need to suspend IO while we do something else, | 638 | * Sometimes we need to suspend IO while we do something else, |
645 | * either some resync/recovery, or reconfigure the array. | 639 | * either some resync/recovery, or reconfigure the array. |
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force) | |||
669 | 663 | ||
670 | /* Wait until no block IO is waiting (unless 'force') */ | 664 | /* Wait until no block IO is waiting (unless 'force') */ |
671 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, | 665 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, |
672 | conf->resync_lock, md_kick_device(conf->mddev)); | 666 | conf->resync_lock, ); |
673 | 667 | ||
674 | /* block any new IO from starting */ | 668 | /* block any new IO from starting */ |
675 | conf->barrier++; | 669 | conf->barrier++; |
676 | 670 | ||
677 | /* No wait for all pending IO to complete */ | 671 | /* Now wait for all pending IO to complete */ |
678 | wait_event_lock_irq(conf->wait_barrier, | 672 | wait_event_lock_irq(conf->wait_barrier, |
679 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 673 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
680 | conf->resync_lock, md_kick_device(conf->mddev)); | 674 | conf->resync_lock, ); |
681 | 675 | ||
682 | spin_unlock_irq(&conf->resync_lock); | 676 | spin_unlock_irq(&conf->resync_lock); |
683 | } | 677 | } |
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf) | |||
698 | conf->nr_waiting++; | 692 | conf->nr_waiting++; |
699 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 693 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
700 | conf->resync_lock, | 694 | conf->resync_lock, |
701 | md_kick_device(conf->mddev)); | 695 | ); |
702 | conf->nr_waiting--; | 696 | conf->nr_waiting--; |
703 | } | 697 | } |
704 | conf->nr_pending++; | 698 | conf->nr_pending++; |
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf) | |||
734 | wait_event_lock_irq(conf->wait_barrier, | 728 | wait_event_lock_irq(conf->wait_barrier, |
735 | conf->nr_pending == conf->nr_queued+1, | 729 | conf->nr_pending == conf->nr_queued+1, |
736 | conf->resync_lock, | 730 | conf->resync_lock, |
737 | ({ flush_pending_writes(conf); | 731 | flush_pending_writes(conf)); |
738 | md_kick_device(conf->mddev); })); | 732 | |
739 | spin_unlock_irq(&conf->resync_lock); | 733 | spin_unlock_irq(&conf->resync_lock); |
740 | } | 734 | } |
741 | 735 | ||
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
762 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 756 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
763 | unsigned long flags; | 757 | unsigned long flags; |
764 | mdk_rdev_t *blocked_rdev; | 758 | mdk_rdev_t *blocked_rdev; |
759 | int plugged; | ||
765 | 760 | ||
766 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 761 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
767 | md_flush_request(mddev, bio); | 762 | md_flush_request(mddev, bio); |
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
870 | * inc refcount on their rdev. Record them by setting | 865 | * inc refcount on their rdev. Record them by setting |
871 | * bios[x] to bio | 866 | * bios[x] to bio |
872 | */ | 867 | */ |
868 | plugged = mddev_check_plugged(mddev); | ||
869 | |||
873 | raid10_find_phys(conf, r10_bio); | 870 | raid10_find_phys(conf, r10_bio); |
874 | retry_write: | 871 | retry_write: |
875 | blocked_rdev = NULL; | 872 | blocked_rdev = NULL; |
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
946 | /* In case raid10d snuck in to freeze_array */ | 943 | /* In case raid10d snuck in to freeze_array */ |
947 | wake_up(&conf->wait_barrier); | 944 | wake_up(&conf->wait_barrier); |
948 | 945 | ||
949 | if (do_sync || !mddev->bitmap) | 946 | if (do_sync || !mddev->bitmap || !plugged) |
950 | md_wakeup_thread(mddev->thread); | 947 | md_wakeup_thread(mddev->thread); |
951 | |||
952 | return 0; | 948 | return 0; |
953 | } | 949 | } |
954 | 950 | ||
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev) | |||
1640 | conf_t *conf = mddev->private; | 1636 | conf_t *conf = mddev->private; |
1641 | struct list_head *head = &conf->retry_list; | 1637 | struct list_head *head = &conf->retry_list; |
1642 | mdk_rdev_t *rdev; | 1638 | mdk_rdev_t *rdev; |
1639 | struct blk_plug plug; | ||
1643 | 1640 | ||
1644 | md_check_recovery(mddev); | 1641 | md_check_recovery(mddev); |
1645 | 1642 | ||
1643 | blk_start_plug(&plug); | ||
1646 | for (;;) { | 1644 | for (;;) { |
1647 | char b[BDEVNAME_SIZE]; | 1645 | char b[BDEVNAME_SIZE]; |
1648 | 1646 | ||
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev) | |||
1716 | } | 1714 | } |
1717 | cond_resched(); | 1715 | cond_resched(); |
1718 | } | 1716 | } |
1717 | blk_finish_plug(&plug); | ||
1719 | } | 1718 | } |
1720 | 1719 | ||
1721 | 1720 | ||