summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-07-03 03:45:31 -0400
committerNeilBrown <neilb@suse.de>2012-07-03 03:45:31 -0400
commitb357f04a67c2aeee828b240863cd3f21d6cb3179 (patch)
treeb8495f2c04fc40d5a2885fe4f7ff8d627cd55031 /drivers/md/raid5.c
parentf456309106e9657645c81bce1a6bb3230393564e (diff)
md: fix up plugging (again).
The value returned by "mddev_check_plug" is only valid until the next 'schedule' as that will unplug things. This could happen at any call to mempool_alloc. So just calling mddev_check_plug at the start doesn't really make sense. So call it just before, or just after, queuing things for the thread. As the action that happens at unplug is to wake the thread, this makes lots of sense. If we cannot add a plug (which requires a small GFP_ATOMIC alloc) we wake thread immediately. RAID5 is a bit different. Requests are queued for the thread and the thread is woken by release_stripe. So we don't need to wake the thread on failure. However the thread doesn't perform certain actions when there is any active plug, so it is important to install a plug before waking the thread. So for RAID5 we install the plug *before* queuing the request and waking the thread. Without this patch it is possible for raid1 or raid10 to queue a request without then waking the thread, resulting in the array locking up. Also change raid10 to only flush_pending_write when there are not active plugs, just like raid1. This patch is suitable for 3.0 or later. I plan to submit it to -stable, but I'll like to let it spend a few weeks in mainline first to be sure it is completely safe. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c6
1 files changed, 1 insertions, 5 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7245a9df35a9..04348d76bb30 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3997,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
3997 struct stripe_head *sh; 3997 struct stripe_head *sh;
3998 const int rw = bio_data_dir(bi); 3998 const int rw = bio_data_dir(bi);
3999 int remaining; 3999 int remaining;
4000 int plugged;
4001 4000
4002 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 4001 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4003 md_flush_request(mddev, bi); 4002 md_flush_request(mddev, bi);
@@ -4016,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4016 bi->bi_next = NULL; 4015 bi->bi_next = NULL;
4017 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4016 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4018 4017
4019 plugged = mddev_check_plugged(mddev);
4020 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 4018 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4021 DEFINE_WAIT(w); 4019 DEFINE_WAIT(w);
4022 int previous; 4020 int previous;
@@ -4118,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4118 if ((bi->bi_rw & REQ_SYNC) && 4116 if ((bi->bi_rw & REQ_SYNC) &&
4119 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4117 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4120 atomic_inc(&conf->preread_active_stripes); 4118 atomic_inc(&conf->preread_active_stripes);
4119 mddev_check_plugged(mddev);
4121 release_stripe(sh); 4120 release_stripe(sh);
4122 } else { 4121 } else {
4123 /* cannot get stripe for read-ahead, just give-up */ 4122 /* cannot get stripe for read-ahead, just give-up */
@@ -4125,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4125 finish_wait(&conf->wait_for_overlap, &w); 4124 finish_wait(&conf->wait_for_overlap, &w);
4126 break; 4125 break;
4127 } 4126 }
4128
4129 } 4127 }
4130 if (!plugged)
4131 md_wakeup_thread(mddev->thread);
4132 4128
4133 spin_lock_irq(&conf->device_lock); 4129 spin_lock_irq(&conf->device_lock);
4134 remaining = raid5_dec_bi_phys_segments(bi); 4130 remaining = raid5_dec_bi_phys_segments(bi);