diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
commit | 7eaceaccab5f40bbfda044629a6298616aeaed50 (patch) | |
tree | 33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /drivers/md/raid5.c | |
parent | 73c101011926c5832e6e141682180c4debe2cf45 (diff) |
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 62 |
1 files changed, 8 insertions, 54 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 702812824195..e867ee42b152 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void unplug_slaves(mddev_t *mddev); | ||
437 | |||
438 | static struct stripe_head * | 436 | static struct stripe_head * |
439 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 437 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
440 | int previous, int noblock, int noquiesce) | 438 | int previous, int noblock, int noquiesce) |
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
463 | < (conf->max_nr_stripes *3/4) | 461 | < (conf->max_nr_stripes *3/4) |
464 | || !conf->inactive_blocked), | 462 | || !conf->inactive_blocked), |
465 | conf->device_lock, | 463 | conf->device_lock, |
466 | md_raid5_unplug_device(conf) | 464 | md_raid5_kick_device(conf)); |
467 | ); | ||
468 | conf->inactive_blocked = 0; | 465 | conf->inactive_blocked = 0; |
469 | } else | 466 | } else |
470 | init_stripe(sh, sector, previous); | 467 | init_stripe(sh, sector, previous); |
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1473 | wait_event_lock_irq(conf->wait_for_stripe, | 1470 | wait_event_lock_irq(conf->wait_for_stripe, |
1474 | !list_empty(&conf->inactive_list), | 1471 | !list_empty(&conf->inactive_list), |
1475 | conf->device_lock, | 1472 | conf->device_lock, |
1476 | unplug_slaves(conf->mddev) | 1473 | blk_flush_plug(current)); |
1477 | ); | ||
1478 | osh = get_free_stripe(conf); | 1474 | osh = get_free_stripe(conf); |
1479 | spin_unlock_irq(&conf->device_lock); | 1475 | spin_unlock_irq(&conf->device_lock); |
1480 | atomic_set(&nsh->count, 1); | 1476 | atomic_set(&nsh->count, 1); |
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
3645 | } | 3641 | } |
3646 | } | 3642 | } |
3647 | 3643 | ||
3648 | static void unplug_slaves(mddev_t *mddev) | 3644 | void md_raid5_kick_device(raid5_conf_t *conf) |
3649 | { | 3645 | { |
3650 | raid5_conf_t *conf = mddev->private; | 3646 | blk_flush_plug(current); |
3651 | int i; | 3647 | raid5_activate_delayed(conf); |
3652 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | ||
3653 | |||
3654 | rcu_read_lock(); | ||
3655 | for (i = 0; i < devs; i++) { | ||
3656 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | ||
3657 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
3658 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
3659 | |||
3660 | atomic_inc(&rdev->nr_pending); | ||
3661 | rcu_read_unlock(); | ||
3662 | |||
3663 | blk_unplug(r_queue); | ||
3664 | |||
3665 | rdev_dec_pending(rdev, mddev); | ||
3666 | rcu_read_lock(); | ||
3667 | } | ||
3668 | } | ||
3669 | rcu_read_unlock(); | ||
3670 | } | ||
3671 | |||
3672 | void md_raid5_unplug_device(raid5_conf_t *conf) | ||
3673 | { | ||
3674 | unsigned long flags; | ||
3675 | |||
3676 | spin_lock_irqsave(&conf->device_lock, flags); | ||
3677 | |||
3678 | if (plugger_remove_plug(&conf->plug)) { | ||
3679 | conf->seq_flush++; | ||
3680 | raid5_activate_delayed(conf); | ||
3681 | } | ||
3682 | md_wakeup_thread(conf->mddev->thread); | 3648 | md_wakeup_thread(conf->mddev->thread); |
3683 | |||
3684 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
3685 | |||
3686 | unplug_slaves(conf->mddev); | ||
3687 | } | 3649 | } |
3688 | EXPORT_SYMBOL_GPL(md_raid5_unplug_device); | 3650 | EXPORT_SYMBOL_GPL(md_raid5_kick_device); |
3689 | 3651 | ||
3690 | static void raid5_unplug(struct plug_handle *plug) | 3652 | static void raid5_unplug(struct plug_handle *plug) |
3691 | { | 3653 | { |
3692 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); | 3654 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); |
3693 | md_raid5_unplug_device(conf); | ||
3694 | } | ||
3695 | 3655 | ||
3696 | static void raid5_unplug_queue(struct request_queue *q) | 3656 | md_raid5_kick_device(conf); |
3697 | { | ||
3698 | mddev_t *mddev = q->queuedata; | ||
3699 | md_raid5_unplug_device(mddev->private); | ||
3700 | } | 3657 | } |
3701 | 3658 | ||
3702 | int md_raid5_congested(mddev_t *mddev, int bits) | 3659 | int md_raid5_congested(mddev_t *mddev, int bits) |
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
4100 | * add failed due to overlap. Flush everything | 4057 | * add failed due to overlap. Flush everything |
4101 | * and wait a while | 4058 | * and wait a while |
4102 | */ | 4059 | */ |
4103 | md_raid5_unplug_device(conf); | 4060 | md_raid5_kick_device(conf); |
4104 | release_stripe(sh); | 4061 | release_stripe(sh); |
4105 | schedule(); | 4062 | schedule(); |
4106 | goto retry; | 4063 | goto retry; |
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4365 | 4322 | ||
4366 | if (sector_nr >= max_sector) { | 4323 | if (sector_nr >= max_sector) { |
4367 | /* just being told to finish up .. nothing much to do */ | 4324 | /* just being told to finish up .. nothing much to do */ |
4368 | unplug_slaves(mddev); | ||
4369 | 4325 | ||
4370 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { | 4326 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
4371 | end_reshape(conf); | 4327 | end_reshape(conf); |
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev) | |||
4569 | spin_unlock_irq(&conf->device_lock); | 4525 | spin_unlock_irq(&conf->device_lock); |
4570 | 4526 | ||
4571 | async_tx_issue_pending_all(); | 4527 | async_tx_issue_pending_all(); |
4572 | unplug_slaves(mddev); | ||
4573 | 4528 | ||
4574 | pr_debug("--- raid5d inactive\n"); | 4529 | pr_debug("--- raid5d inactive\n"); |
4575 | } | 4530 | } |
@@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev) | |||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5160 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5161 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->queue_lock = &conf->device_lock; | 5162 | mddev->queue->queue_lock = &conf->device_lock; |
5208 | mddev->queue->unplug_fn = raid5_unplug_queue; | ||
5209 | 5163 | ||
5210 | chunk_size = mddev->chunk_sectors << 9; | 5164 | chunk_size = mddev->chunk_sectors << 9; |
5211 | blk_queue_io_min(mddev->queue, chunk_size); | 5165 | blk_queue_io_min(mddev->queue, chunk_size); |