aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2012-11-30 05:42:40 -0500
committerJens Axboe <axboe@kernel.dk>2012-11-30 05:47:57 -0500
commiteed8c02e680c04cd737e0a9cef74e68d8eb0cefa (patch)
tree8bd2bd10b0c02bb8a579ca3fd4f1482e5335c747 /drivers/md/raid5.c
parentd33b98fc82b0908e91fb05ae081acaed7323f9d2 (diff)
wait: add wait_event_lock_irq() interface
New wait_event{_interruptible}_lock_irq{_cmd} macros added. This commit moves the private wait_event_lock_irq() macro from MD to regular wait includes, introduces new macro wait_event_lock_irq_cmd() instead of using the old method with omitting cmd parameter which is ugly and makes a use of new macros in the MD. It also introduces the _interruptible_ variant. The use of new interface is when one have a special lock to protect data structures used in the condition, or one also needs to invoke "cmd" before putting it to sleep. All new macros are expected to be called with the lock taken. The lock is released before sleep and is reacquired afterwards. We will leave the macro with the lock held. Note to DM: IMO this should also fix theoretical race on waitqueue while using simultaneously wait_event_lock_irq() and wait_event() because of lack of locking around current state setting and wait queue removal. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Cc: Neil Brown <neilb@suse.de> Cc: David Howells <dhowells@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c5439dce0295..2bf617d6f4fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -466,7 +466,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
466 do { 466 do {
467 wait_event_lock_irq(conf->wait_for_stripe, 467 wait_event_lock_irq(conf->wait_for_stripe,
468 conf->quiesce == 0 || noquiesce, 468 conf->quiesce == 0 || noquiesce,
469 conf->device_lock, /* nothing */); 469 conf->device_lock);
470 sh = __find_stripe(conf, sector, conf->generation - previous); 470 sh = __find_stripe(conf, sector, conf->generation - previous);
471 if (!sh) { 471 if (!sh) {
472 if (!conf->inactive_blocked) 472 if (!conf->inactive_blocked)
@@ -480,8 +480,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
480 (atomic_read(&conf->active_stripes) 480 (atomic_read(&conf->active_stripes)
481 < (conf->max_nr_stripes *3/4) 481 < (conf->max_nr_stripes *3/4)
482 || !conf->inactive_blocked), 482 || !conf->inactive_blocked),
483 conf->device_lock, 483 conf->device_lock);
484 );
485 conf->inactive_blocked = 0; 484 conf->inactive_blocked = 0;
486 } else 485 } else
487 init_stripe(sh, sector, previous); 486 init_stripe(sh, sector, previous);
@@ -1646,8 +1645,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1646 spin_lock_irq(&conf->device_lock); 1645 spin_lock_irq(&conf->device_lock);
1647 wait_event_lock_irq(conf->wait_for_stripe, 1646 wait_event_lock_irq(conf->wait_for_stripe,
1648 !list_empty(&conf->inactive_list), 1647 !list_empty(&conf->inactive_list),
1649 conf->device_lock, 1648 conf->device_lock);
1650 );
1651 osh = get_free_stripe(conf); 1649 osh = get_free_stripe(conf);
1652 spin_unlock_irq(&conf->device_lock); 1650 spin_unlock_irq(&conf->device_lock);
1653 atomic_set(&nsh->count, 1); 1651 atomic_set(&nsh->count, 1);
@@ -4000,7 +3998,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4000 spin_lock_irq(&conf->device_lock); 3998 spin_lock_irq(&conf->device_lock);
4001 wait_event_lock_irq(conf->wait_for_stripe, 3999 wait_event_lock_irq(conf->wait_for_stripe,
4002 conf->quiesce == 0, 4000 conf->quiesce == 0,
4003 conf->device_lock, /* nothing */); 4001 conf->device_lock);
4004 atomic_inc(&conf->active_aligned_reads); 4002 atomic_inc(&conf->active_aligned_reads);
4005 spin_unlock_irq(&conf->device_lock); 4003 spin_unlock_irq(&conf->device_lock);
4006 4004
@@ -6088,7 +6086,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
6088 wait_event_lock_irq(conf->wait_for_stripe, 6086 wait_event_lock_irq(conf->wait_for_stripe,
6089 atomic_read(&conf->active_stripes) == 0 && 6087 atomic_read(&conf->active_stripes) == 0 &&
6090 atomic_read(&conf->active_aligned_reads) == 0, 6088 atomic_read(&conf->active_aligned_reads) == 0,
6091 conf->device_lock, /* nothing */); 6089 conf->device_lock);
6092 conf->quiesce = 1; 6090 conf->quiesce = 1;
6093 spin_unlock_irq(&conf->device_lock); 6091 spin_unlock_irq(&conf->device_lock);
6094 /* allow reshape to continue */ 6092 /* allow reshape to continue */