aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2012-11-30 05:42:40 -0500
committerJens Axboe <axboe@kernel.dk>2012-11-30 05:47:57 -0500
commiteed8c02e680c04cd737e0a9cef74e68d8eb0cefa (patch)
tree8bd2bd10b0c02bb8a579ca3fd4f1482e5335c747 /drivers/md/raid1.c
parentd33b98fc82b0908e91fb05ae081acaed7323f9d2 (diff)
wait: add wait_event_lock_irq() interface
New wait_event{_interruptible}_lock_irq{_cmd} macros added. This commit moves the private wait_event_lock_irq() macro from MD to regular wait includes, introduces new macro wait_event_lock_irq_cmd() instead of using the old method with omitting cmd parameter which is ugly and makes a use of new macros in the MD. It also introduces the _interruptible_ variant. The use of new interface is when one have a special lock to protect data structures used in the condition, or one also needs to invoke "cmd" before putting it to sleep. All new macros are expected to be called with the lock taken. The lock is released before sleep and is reacquired afterwards. We will leave the macro with the lock held. Note to DM: IMO this should also fix theoretical race on waitqueue while using simultaneously wait_event_lock_irq() and wait_event() because of lack of locking around current state setting and wait queue removal. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Cc: Neil Brown <neilb@suse.de> Cc: David Howells <dhowells@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8034fbd6190c..534dd74a2da0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
822 822
823 /* Wait until no block IO is waiting */ 823 /* Wait until no block IO is waiting */
824 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 824 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
825 conf->resync_lock, ); 825 conf->resync_lock);
826 826
827 /* block any new IO from starting */ 827 /* block any new IO from starting */
828 conf->barrier++; 828 conf->barrier++;
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
830 /* Now wait for all pending IO to complete */ 830 /* Now wait for all pending IO to complete */
831 wait_event_lock_irq(conf->wait_barrier, 831 wait_event_lock_irq(conf->wait_barrier,
832 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 832 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
833 conf->resync_lock, ); 833 conf->resync_lock);
834 834
835 spin_unlock_irq(&conf->resync_lock); 835 spin_unlock_irq(&conf->resync_lock);
836} 836}
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
864 (conf->nr_pending && 864 (conf->nr_pending &&
865 current->bio_list && 865 current->bio_list &&
866 !bio_list_empty(current->bio_list)), 866 !bio_list_empty(current->bio_list)),
867 conf->resync_lock, 867 conf->resync_lock);
868 );
869 conf->nr_waiting--; 868 conf->nr_waiting--;
870 } 869 }
871 conf->nr_pending++; 870 conf->nr_pending++;
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
898 spin_lock_irq(&conf->resync_lock); 897 spin_lock_irq(&conf->resync_lock);
899 conf->barrier++; 898 conf->barrier++;
900 conf->nr_waiting++; 899 conf->nr_waiting++;
901 wait_event_lock_irq(conf->wait_barrier, 900 wait_event_lock_irq_cmd(conf->wait_barrier,
902 conf->nr_pending == conf->nr_queued+1, 901 conf->nr_pending == conf->nr_queued+1,
903 conf->resync_lock, 902 conf->resync_lock,
904 flush_pending_writes(conf)); 903 flush_pending_writes(conf));
905 spin_unlock_irq(&conf->resync_lock); 904 spin_unlock_irq(&conf->resync_lock);
906} 905}
907static void unfreeze_array(struct r1conf *conf) 906static void unfreeze_array(struct r1conf *conf)