aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2006-07-10 07:44:16 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-10 16:24:16 -0400
commit7c785b7a18dc30572a49c6b75efd384269735d14 (patch)
treede115f4692b1be55ae3ad93a2719edc5b5688a37 /drivers/md
parentff4e8d9a9f46e3a7f89d14ade52fe5d53a82c022 (diff)
[PATCH] md: fix a plug/unplug race in raid5
When a device is unplugged, requests are moved from one or two (depending on whether a bitmap is in use) queues to the main request queue. So whenever requests are put on either of those queues, we should make sure the raid5 array is 'plugged'. However we don't. We currently plug the raid5 queue just before putting requests on queues, so there is room for a race. If something unplugs the queue at just the wrong time, requests will be left on the queue and nothing will want to unplug them. Normally something else will plug and unplug the queue fairly soon, but there is a risk that nothing will. Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dd0d00108a31..6ba394082129 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -88,12 +88,14 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
88 BUG_ON(!list_empty(&sh->lru)); 88 BUG_ON(!list_empty(&sh->lru));
89 BUG_ON(atomic_read(&conf->active_stripes)==0); 89 BUG_ON(atomic_read(&conf->active_stripes)==0);
90 if (test_bit(STRIPE_HANDLE, &sh->state)) { 90 if (test_bit(STRIPE_HANDLE, &sh->state)) {
91 if (test_bit(STRIPE_DELAYED, &sh->state)) 91 if (test_bit(STRIPE_DELAYED, &sh->state)) {
92 list_add_tail(&sh->lru, &conf->delayed_list); 92 list_add_tail(&sh->lru, &conf->delayed_list);
93 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 93 blk_plug_device(conf->mddev->queue);
94 conf->seq_write == sh->bm_seq) 94 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
95 conf->seq_write == sh->bm_seq) {
95 list_add_tail(&sh->lru, &conf->bitmap_list); 96 list_add_tail(&sh->lru, &conf->bitmap_list);
96 else { 97 blk_plug_device(conf->mddev->queue);
98 } else {
97 clear_bit(STRIPE_BIT_DELAY, &sh->state); 99 clear_bit(STRIPE_BIT_DELAY, &sh->state);
98 list_add_tail(&sh->lru, &conf->handle_list); 100 list_add_tail(&sh->lru, &conf->handle_list);
99 } 101 }
@@ -2555,13 +2557,6 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
2555 return ret; 2557 return ret;
2556} 2558}
2557 2559
2558static inline void raid5_plug_device(raid5_conf_t *conf)
2559{
2560 spin_lock_irq(&conf->device_lock);
2561 blk_plug_device(conf->mddev->queue);
2562 spin_unlock_irq(&conf->device_lock);
2563}
2564
2565static int make_request(request_queue_t *q, struct bio * bi) 2560static int make_request(request_queue_t *q, struct bio * bi)
2566{ 2561{
2567 mddev_t *mddev = q->queuedata; 2562 mddev_t *mddev = q->queuedata;
@@ -2671,7 +2666,6 @@ static int make_request(request_queue_t *q, struct bio * bi)
2671 goto retry; 2666 goto retry;
2672 } 2667 }
2673 finish_wait(&conf->wait_for_overlap, &w); 2668 finish_wait(&conf->wait_for_overlap, &w);
2674 raid5_plug_device(conf);
2675 handle_stripe(sh, NULL); 2669 handle_stripe(sh, NULL);
2676 release_stripe(sh); 2670 release_stripe(sh);
2677 } else { 2671 } else {