aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-06-01 05:37:29 -0400
committerNeilBrown <neilb@suse.de>2010-07-25 22:53:08 -0400
commit2ac8740151b082f045e58010eb92560c3a23a0e9 (patch)
tree39bff686e28f033339c6d1cf47042b6dc1586c2f
parent11d8a6e3719519fbc0e2c9d61b6fa931b84bf813 (diff)
md/raid5: add simple plugging infrastructure.
md/raid5 uses the plugging infrastructure provided by the block layer and 'struct request_queue'. However when we plug raid5 under dm there is no request queue so we cannot use that. So create a similar infrastructure that is much lighter weight and use it for raid5. Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r--drivers/md/md.c45
-rw-r--r--drivers/md/md.h20
-rw-r--r--drivers/md/raid5.c39
-rw-r--r--drivers/md/raid5.h3
4 files changed, 93 insertions, 14 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f8775699e15a..eec75f130708 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -386,6 +386,51 @@ void md_barrier_request(mddev_t *mddev, struct bio *bio)
386} 386}
387EXPORT_SYMBOL(md_barrier_request); 387EXPORT_SYMBOL(md_barrier_request);
388 388
389/* Support for plugging.
390 * This mirrors the plugging support in request_queue, but does not
391 * require having a whole queue
392 */
393static void plugger_work(struct work_struct *work)
394{
395 struct plug_handle *plug =
396 container_of(work, struct plug_handle, unplug_work);
397 plug->unplug_fn(plug);
398}
399static void plugger_timeout(unsigned long data)
400{
401 struct plug_handle *plug = (void *)data;
402 kblockd_schedule_work(NULL, &plug->unplug_work);
403}
404void plugger_init(struct plug_handle *plug,
405 void (*unplug_fn)(struct plug_handle *))
406{
407 plug->unplug_flag = 0;
408 plug->unplug_fn = unplug_fn;
409 init_timer(&plug->unplug_timer);
410 plug->unplug_timer.function = plugger_timeout;
411 plug->unplug_timer.data = (unsigned long)plug;
412 INIT_WORK(&plug->unplug_work, plugger_work);
413}
414EXPORT_SYMBOL_GPL(plugger_init);
415
416void plugger_set_plug(struct plug_handle *plug)
417{
418 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
419 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
420}
421EXPORT_SYMBOL_GPL(plugger_set_plug);
422
423int plugger_remove_plug(struct plug_handle *plug)
424{
425 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
426 del_timer(&plug->unplug_timer);
427 return 1;
428 } else
429 return 0;
430}
431EXPORT_SYMBOL_GPL(plugger_remove_plug);
432
433
389static inline mddev_t *mddev_get(mddev_t *mddev) 434static inline mddev_t *mddev_get(mddev_t *mddev)
390{ 435{
391 atomic_inc(&mddev->active); 436 atomic_inc(&mddev->active);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c88b04745e85..5be0d6921b9d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,26 @@
29typedef struct mddev_s mddev_t; 29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t; 30typedef struct mdk_rdev_s mdk_rdev_t;
31 31
32/* generic plugging support - like that provided with request_queue,
33 * but does not require a request_queue
34 */
35struct plug_handle {
36 void (*unplug_fn)(struct plug_handle *);
37 struct timer_list unplug_timer;
38 struct work_struct unplug_work;
39 unsigned long unplug_flag;
40};
41#define PLUGGED_FLAG 1
42void plugger_init(struct plug_handle *plug,
43 void (*unplug_fn)(struct plug_handle *));
44void plugger_set_plug(struct plug_handle *plug);
45int plugger_remove_plug(struct plug_handle *plug);
46static inline void plugger_flush(struct plug_handle *plug)
47{
48 del_timer_sync(&plug->unplug_timer);
49 cancel_work_sync(&plug->unplug_work);
50}
51
32/* 52/*
33 * MD's 'extended' device 53 * MD's 'extended' device
34 */ 54 */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ad6694f8a3a8..84bb9aec2211 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
201 if (test_bit(STRIPE_HANDLE, &sh->state)) { 201 if (test_bit(STRIPE_HANDLE, &sh->state)) {
202 if (test_bit(STRIPE_DELAYED, &sh->state)) { 202 if (test_bit(STRIPE_DELAYED, &sh->state)) {
203 list_add_tail(&sh->lru, &conf->delayed_list); 203 list_add_tail(&sh->lru, &conf->delayed_list);
204 blk_plug_device(conf->mddev->queue); 204 plugger_set_plug(&conf->plug);
205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
206 sh->bm_seq - conf->seq_write > 0) { 206 sh->bm_seq - conf->seq_write > 0) {
207 list_add_tail(&sh->lru, &conf->bitmap_list); 207 list_add_tail(&sh->lru, &conf->bitmap_list);
208 blk_plug_device(conf->mddev->queue); 208 plugger_set_plug(&conf->plug);
209 } else { 209 } else {
210 clear_bit(STRIPE_BIT_DELAY, &sh->state); 210 clear_bit(STRIPE_BIT_DELAY, &sh->state);
211 list_add_tail(&sh->lru, &conf->handle_list); 211 list_add_tail(&sh->lru, &conf->handle_list);
@@ -434,7 +434,7 @@ static int has_failed(raid5_conf_t *conf)
434} 434}
435 435
436static void unplug_slaves(mddev_t *mddev); 436static void unplug_slaves(mddev_t *mddev);
437static void raid5_unplug_device(struct request_queue *q); 437static void raid5_unplug_device(raid5_conf_t *conf);
438 438
439static struct stripe_head * 439static struct stripe_head *
440get_active_stripe(raid5_conf_t *conf, sector_t sector, 440get_active_stripe(raid5_conf_t *conf, sector_t sector,
@@ -464,7 +464,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
464 < (conf->max_nr_stripes *3/4) 464 < (conf->max_nr_stripes *3/4)
465 || !conf->inactive_blocked), 465 || !conf->inactive_blocked),
466 conf->device_lock, 466 conf->device_lock,
467 raid5_unplug_device(conf->mddev->queue) 467 raid5_unplug_device(conf)
468 ); 468 );
469 conf->inactive_blocked = 0; 469 conf->inactive_blocked = 0;
470 } else 470 } else
@@ -3618,7 +3618,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
3618 list_add_tail(&sh->lru, &conf->hold_list); 3618 list_add_tail(&sh->lru, &conf->hold_list);
3619 } 3619 }
3620 } else 3620 } else
3621 blk_plug_device(conf->mddev->queue); 3621 plugger_set_plug(&conf->plug);
3622} 3622}
3623 3623
3624static void activate_bit_delay(raid5_conf_t *conf) 3624static void activate_bit_delay(raid5_conf_t *conf)
@@ -3659,23 +3659,33 @@ static void unplug_slaves(mddev_t *mddev)
3659 rcu_read_unlock(); 3659 rcu_read_unlock();
3660} 3660}
3661 3661
3662static void raid5_unplug_device(struct request_queue *q) 3662static void raid5_unplug_device(raid5_conf_t *conf)
3663{ 3663{
3664 mddev_t *mddev = q->queuedata;
3665 raid5_conf_t *conf = mddev->private;
3666 unsigned long flags; 3664 unsigned long flags;
3667 3665
3668 spin_lock_irqsave(&conf->device_lock, flags); 3666 spin_lock_irqsave(&conf->device_lock, flags);
3669 3667
3670 if (blk_remove_plug(q)) { 3668 if (plugger_remove_plug(&conf->plug)) {
3671 conf->seq_flush++; 3669 conf->seq_flush++;
3672 raid5_activate_delayed(conf); 3670 raid5_activate_delayed(conf);
3673 } 3671 }
3674 md_wakeup_thread(mddev->thread); 3672 md_wakeup_thread(conf->mddev->thread);
3675 3673
3676 spin_unlock_irqrestore(&conf->device_lock, flags); 3674 spin_unlock_irqrestore(&conf->device_lock, flags);
3677 3675
3678 unplug_slaves(mddev); 3676 unplug_slaves(conf->mddev);
3677}
3678
3679static void raid5_unplug(struct plug_handle *plug)
3680{
3681 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3682 raid5_unplug_device(conf);
3683}
3684
3685static void raid5_unplug_queue(struct request_queue *q)
3686{
3687 mddev_t *mddev = q->queuedata;
3688 raid5_unplug_device(mddev->private);
3679} 3689}
3680 3690
3681int md_raid5_congested(mddev_t *mddev, int bits) 3691int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4085,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4085 * add failed due to overlap. Flush everything 4095 * add failed due to overlap. Flush everything
4086 * and wait a while 4096 * and wait a while
4087 */ 4097 */
4088 raid5_unplug_device(mddev->queue); 4098 raid5_unplug_device(conf);
4089 release_stripe(sh); 4099 release_stripe(sh);
4090 schedule(); 4100 schedule();
4091 goto retry; 4101 goto retry;
@@ -5178,6 +5188,7 @@ static int run(mddev_t *mddev)
5178 mdname(mddev)); 5188 mdname(mddev));
5179 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5189 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5180 5190
5191 plugger_init(&conf->plug, raid5_unplug);
5181 if (mddev->queue) { 5192 if (mddev->queue) {
5182 /* read-ahead size must cover two whole stripes, which 5193 /* read-ahead size must cover two whole stripes, which
5183 * is 2 * (datadisks) * chunksize where 'n' is the 5194 * is 2 * (datadisks) * chunksize where 'n' is the
@@ -5197,7 +5208,7 @@ static int run(mddev_t *mddev)
5197 5208
5198 mddev->queue->queue_lock = &conf->device_lock; 5209 mddev->queue->queue_lock = &conf->device_lock;
5199 5210
5200 mddev->queue->unplug_fn = raid5_unplug_device; 5211 mddev->queue->unplug_fn = raid5_unplug_queue;
5201 5212
5202 chunk_size = mddev->chunk_sectors << 9; 5213 chunk_size = mddev->chunk_sectors << 9;
5203 blk_queue_io_min(mddev->queue, chunk_size); 5214 blk_queue_io_min(mddev->queue, chunk_size);
@@ -5229,7 +5240,7 @@ static int stop(mddev_t *mddev)
5229 mddev->thread = NULL; 5240 mddev->thread = NULL;
5230 if (mddev->queue) 5241 if (mddev->queue)
5231 mddev->queue->backing_dev_info.congested_fn = NULL; 5242 mddev->queue->backing_dev_info.congested_fn = NULL;
5232 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5243 plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
5233 free_conf(conf); 5244 free_conf(conf);
5234 mddev->private = NULL; 5245 mddev->private = NULL;
5235 mddev->to_remove = &raid5_attrs_group; 5246 mddev->to_remove = &raid5_attrs_group;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index d6470dec667a..6acd458f239d 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -398,6 +398,9 @@ struct raid5_private_data {
398 * (fresh device added). 398 * (fresh device added).
399 * Cleared when a sync completes. 399 * Cleared when a sync completes.
400 */ 400 */
401
402 struct plug_handle plug;
403
401 /* per cpu variables */ 404 /* per cpu variables */
402 struct raid5_percpu { 405 struct raid5_percpu {
403 struct page *spare_page; /* Used when checking P/Q in raid6 */ 406 struct page *spare_page; /* Used when checking P/Q in raid6 */