aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-06-01 05:37:29 -0400
committerNeilBrown <neilb@suse.de>2010-07-25 22:53:08 -0400
commit2ac8740151b082f045e58010eb92560c3a23a0e9 (patch)
tree39bff686e28f033339c6d1cf47042b6dc1586c2f /drivers/md/raid5.c
parent11d8a6e3719519fbc0e2c9d61b6fa931b84bf813 (diff)
md/raid5: add simple plugging infrastructure.
md/raid5 uses the plugging infrastructure provided by the block layer and 'struct request_queue'. However when we plug raid5 under dm there is no request queue so we cannot use that. So create a similar infrastructure that is much lighter weight and use it for raid5. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c39
1 files changed, 25 insertions, 14 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ad6694f8a3a8..84bb9aec2211 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
201 if (test_bit(STRIPE_HANDLE, &sh->state)) { 201 if (test_bit(STRIPE_HANDLE, &sh->state)) {
202 if (test_bit(STRIPE_DELAYED, &sh->state)) { 202 if (test_bit(STRIPE_DELAYED, &sh->state)) {
203 list_add_tail(&sh->lru, &conf->delayed_list); 203 list_add_tail(&sh->lru, &conf->delayed_list);
204 blk_plug_device(conf->mddev->queue); 204 plugger_set_plug(&conf->plug);
205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
206 sh->bm_seq - conf->seq_write > 0) { 206 sh->bm_seq - conf->seq_write > 0) {
207 list_add_tail(&sh->lru, &conf->bitmap_list); 207 list_add_tail(&sh->lru, &conf->bitmap_list);
208 blk_plug_device(conf->mddev->queue); 208 plugger_set_plug(&conf->plug);
209 } else { 209 } else {
210 clear_bit(STRIPE_BIT_DELAY, &sh->state); 210 clear_bit(STRIPE_BIT_DELAY, &sh->state);
211 list_add_tail(&sh->lru, &conf->handle_list); 211 list_add_tail(&sh->lru, &conf->handle_list);
@@ -434,7 +434,7 @@ static int has_failed(raid5_conf_t *conf)
434} 434}
435 435
436static void unplug_slaves(mddev_t *mddev); 436static void unplug_slaves(mddev_t *mddev);
437static void raid5_unplug_device(struct request_queue *q); 437static void raid5_unplug_device(raid5_conf_t *conf);
438 438
439static struct stripe_head * 439static struct stripe_head *
440get_active_stripe(raid5_conf_t *conf, sector_t sector, 440get_active_stripe(raid5_conf_t *conf, sector_t sector,
@@ -464,7 +464,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
464 < (conf->max_nr_stripes *3/4) 464 < (conf->max_nr_stripes *3/4)
465 || !conf->inactive_blocked), 465 || !conf->inactive_blocked),
466 conf->device_lock, 466 conf->device_lock,
467 raid5_unplug_device(conf->mddev->queue) 467 raid5_unplug_device(conf)
468 ); 468 );
469 conf->inactive_blocked = 0; 469 conf->inactive_blocked = 0;
470 } else 470 } else
@@ -3618,7 +3618,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
3618 list_add_tail(&sh->lru, &conf->hold_list); 3618 list_add_tail(&sh->lru, &conf->hold_list);
3619 } 3619 }
3620 } else 3620 } else
3621 blk_plug_device(conf->mddev->queue); 3621 plugger_set_plug(&conf->plug);
3622} 3622}
3623 3623
3624static void activate_bit_delay(raid5_conf_t *conf) 3624static void activate_bit_delay(raid5_conf_t *conf)
@@ -3659,23 +3659,33 @@ static void unplug_slaves(mddev_t *mddev)
3659 rcu_read_unlock(); 3659 rcu_read_unlock();
3660} 3660}
3661 3661
3662static void raid5_unplug_device(struct request_queue *q) 3662static void raid5_unplug_device(raid5_conf_t *conf)
3663{ 3663{
3664 mddev_t *mddev = q->queuedata;
3665 raid5_conf_t *conf = mddev->private;
3666 unsigned long flags; 3664 unsigned long flags;
3667 3665
3668 spin_lock_irqsave(&conf->device_lock, flags); 3666 spin_lock_irqsave(&conf->device_lock, flags);
3669 3667
3670 if (blk_remove_plug(q)) { 3668 if (plugger_remove_plug(&conf->plug)) {
3671 conf->seq_flush++; 3669 conf->seq_flush++;
3672 raid5_activate_delayed(conf); 3670 raid5_activate_delayed(conf);
3673 } 3671 }
3674 md_wakeup_thread(mddev->thread); 3672 md_wakeup_thread(conf->mddev->thread);
3675 3673
3676 spin_unlock_irqrestore(&conf->device_lock, flags); 3674 spin_unlock_irqrestore(&conf->device_lock, flags);
3677 3675
3678 unplug_slaves(mddev); 3676 unplug_slaves(conf->mddev);
3677}
3678
3679static void raid5_unplug(struct plug_handle *plug)
3680{
3681 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3682 raid5_unplug_device(conf);
3683}
3684
3685static void raid5_unplug_queue(struct request_queue *q)
3686{
3687 mddev_t *mddev = q->queuedata;
3688 raid5_unplug_device(mddev->private);
3679} 3689}
3680 3690
3681int md_raid5_congested(mddev_t *mddev, int bits) 3691int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4085,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4085 * add failed due to overlap. Flush everything 4095 * add failed due to overlap. Flush everything
4086 * and wait a while 4096 * and wait a while
4087 */ 4097 */
4088 raid5_unplug_device(mddev->queue); 4098 raid5_unplug_device(conf);
4089 release_stripe(sh); 4099 release_stripe(sh);
4090 schedule(); 4100 schedule();
4091 goto retry; 4101 goto retry;
@@ -5178,6 +5188,7 @@ static int run(mddev_t *mddev)
5178 mdname(mddev)); 5188 mdname(mddev));
5179 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5189 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5180 5190
5191 plugger_init(&conf->plug, raid5_unplug);
5181 if (mddev->queue) { 5192 if (mddev->queue) {
5182 /* read-ahead size must cover two whole stripes, which 5193 /* read-ahead size must cover two whole stripes, which
5183 * is 2 * (datadisks) * chunksize where 'n' is the 5194 * is 2 * (datadisks) * chunksize where 'n' is the
@@ -5197,7 +5208,7 @@ static int run(mddev_t *mddev)
5197 5208
5198 mddev->queue->queue_lock = &conf->device_lock; 5209 mddev->queue->queue_lock = &conf->device_lock;
5199 5210
5200 mddev->queue->unplug_fn = raid5_unplug_device; 5211 mddev->queue->unplug_fn = raid5_unplug_queue;
5201 5212
5202 chunk_size = mddev->chunk_sectors << 9; 5213 chunk_size = mddev->chunk_sectors << 9;
5203 blk_queue_io_min(mddev->queue, chunk_size); 5214 blk_queue_io_min(mddev->queue, chunk_size);
@@ -5229,7 +5240,7 @@ static int stop(mddev_t *mddev)
5229 mddev->thread = NULL; 5240 mddev->thread = NULL;
5230 if (mddev->queue) 5241 if (mddev->queue)
5231 mddev->queue->backing_dev_info.congested_fn = NULL; 5242 mddev->queue->backing_dev_info.congested_fn = NULL;
5232 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5243 plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
5233 free_conf(conf); 5244 free_conf(conf);
5234 mddev->private = NULL; 5245 mddev->private = NULL;
5235 mddev->to_remove = &raid5_attrs_group; 5246 mddev->to_remove = &raid5_attrs_group;