aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2012-11-30 05:42:40 -0500
committerJens Axboe <axboe@kernel.dk>2012-11-30 05:47:57 -0500
commiteed8c02e680c04cd737e0a9cef74e68d8eb0cefa (patch)
tree8bd2bd10b0c02bb8a579ca3fd4f1482e5335c747 /drivers
parentd33b98fc82b0908e91fb05ae081acaed7323f9d2 (diff)
wait: add wait_event_lock_irq() interface
New wait_event{_interruptible}_lock_irq{_cmd} macros added. This commit moves the private wait_event_lock_irq() macro from MD to regular wait includes, introduces new macro wait_event_lock_irq_cmd() instead of using the old method with omitting cmd parameter which is ugly and makes a use of new macros in the MD. It also introduces the _interruptible_ variant. The use of new interface is when one have a special lock to protect data structures used in the condition, or one also needs to invoke "cmd" before putting it to sleep. All new macros are expected to be called with the lock taken. The lock is released before sleep and is reacquired afterwards. We will leave the macro with the lock held. Note to DM: IMO this should also fix theoretical race on waitqueue while using simultaneously wait_event_lock_irq() and wait_event() because of lack of locking around current state setting and wait queue removal. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Cc: Neil Brown <neilb@suse.de> Cc: David Howells <dhowells@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c12
5 files changed, 20 insertions, 50 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9ab768acfb62..7e513a38cec7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
452 spin_lock_irq(&mddev->write_lock); 452 spin_lock_irq(&mddev->write_lock);
453 wait_event_lock_irq(mddev->sb_wait, 453 wait_event_lock_irq(mddev->sb_wait,
454 !mddev->flush_bio, 454 !mddev->flush_bio,
455 mddev->write_lock, /*nothing*/); 455 mddev->write_lock);
456 mddev->flush_bio = bio; 456 mddev->flush_bio = bio;
457 spin_unlock_irq(&mddev->write_lock); 457 spin_unlock_irq(&mddev->write_lock);
458 458
diff --git a/drivers/md/md.h b/drivers/md/md.h
index af443ab868db..1e2fc3d9c74c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -551,32 +551,6 @@ struct md_thread {
551 551
552#define THREAD_WAKEUP 0 552#define THREAD_WAKEUP 0
553 553
554#define __wait_event_lock_irq(wq, condition, lock, cmd) \
555do { \
556 wait_queue_t __wait; \
557 init_waitqueue_entry(&__wait, current); \
558 \
559 add_wait_queue(&wq, &__wait); \
560 for (;;) { \
561 set_current_state(TASK_UNINTERRUPTIBLE); \
562 if (condition) \
563 break; \
564 spin_unlock_irq(&lock); \
565 cmd; \
566 schedule(); \
567 spin_lock_irq(&lock); \
568 } \
569 current->state = TASK_RUNNING; \
570 remove_wait_queue(&wq, &__wait); \
571} while (0)
572
573#define wait_event_lock_irq(wq, condition, lock, cmd) \
574do { \
575 if (condition) \
576 break; \
577 __wait_event_lock_irq(wq, condition, lock, cmd); \
578} while (0)
579
580static inline void safe_put_page(struct page *p) 554static inline void safe_put_page(struct page *p)
581{ 555{
582 if (p) put_page(p); 556 if (p) put_page(p);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8034fbd6190c..534dd74a2da0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
822 822
823 /* Wait until no block IO is waiting */ 823 /* Wait until no block IO is waiting */
824 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 824 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
825 conf->resync_lock, ); 825 conf->resync_lock);
826 826
827 /* block any new IO from starting */ 827 /* block any new IO from starting */
828 conf->barrier++; 828 conf->barrier++;
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
830 /* Now wait for all pending IO to complete */ 830 /* Now wait for all pending IO to complete */
831 wait_event_lock_irq(conf->wait_barrier, 831 wait_event_lock_irq(conf->wait_barrier,
832 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 832 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
833 conf->resync_lock, ); 833 conf->resync_lock);
834 834
835 spin_unlock_irq(&conf->resync_lock); 835 spin_unlock_irq(&conf->resync_lock);
836} 836}
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
864 (conf->nr_pending && 864 (conf->nr_pending &&
865 current->bio_list && 865 current->bio_list &&
866 !bio_list_empty(current->bio_list)), 866 !bio_list_empty(current->bio_list)),
867 conf->resync_lock, 867 conf->resync_lock);
868 );
869 conf->nr_waiting--; 868 conf->nr_waiting--;
870 } 869 }
871 conf->nr_pending++; 870 conf->nr_pending++;
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
898 spin_lock_irq(&conf->resync_lock); 897 spin_lock_irq(&conf->resync_lock);
899 conf->barrier++; 898 conf->barrier++;
900 conf->nr_waiting++; 899 conf->nr_waiting++;
901 wait_event_lock_irq(conf->wait_barrier, 900 wait_event_lock_irq_cmd(conf->wait_barrier,
902 conf->nr_pending == conf->nr_queued+1, 901 conf->nr_pending == conf->nr_queued+1,
903 conf->resync_lock, 902 conf->resync_lock,
904 flush_pending_writes(conf)); 903 flush_pending_writes(conf));
905 spin_unlock_irq(&conf->resync_lock); 904 spin_unlock_irq(&conf->resync_lock);
906} 905}
907static void unfreeze_array(struct r1conf *conf) 906static void unfreeze_array(struct r1conf *conf)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 906ccbd0f7dc..9a08f621b27d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force)
952 952
953 /* Wait until no block IO is waiting (unless 'force') */ 953 /* Wait until no block IO is waiting (unless 'force') */
954 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 954 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
955 conf->resync_lock, ); 955 conf->resync_lock);
956 956
957 /* block any new IO from starting */ 957 /* block any new IO from starting */
958 conf->barrier++; 958 conf->barrier++;
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force)
960 /* Now wait for all pending IO to complete */ 960 /* Now wait for all pending IO to complete */
961 wait_event_lock_irq(conf->wait_barrier, 961 wait_event_lock_irq(conf->wait_barrier,
962 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 962 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
963 conf->resync_lock, ); 963 conf->resync_lock);
964 964
965 spin_unlock_irq(&conf->resync_lock); 965 spin_unlock_irq(&conf->resync_lock);
966} 966}
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf)
993 (conf->nr_pending && 993 (conf->nr_pending &&
994 current->bio_list && 994 current->bio_list &&
995 !bio_list_empty(current->bio_list)), 995 !bio_list_empty(current->bio_list)),
996 conf->resync_lock, 996 conf->resync_lock);
997 );
998 conf->nr_waiting--; 997 conf->nr_waiting--;
999 } 998 }
1000 conf->nr_pending++; 999 conf->nr_pending++;
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf)
1027 spin_lock_irq(&conf->resync_lock); 1026 spin_lock_irq(&conf->resync_lock);
1028 conf->barrier++; 1027 conf->barrier++;
1029 conf->nr_waiting++; 1028 conf->nr_waiting++;
1030 wait_event_lock_irq(conf->wait_barrier, 1029 wait_event_lock_irq_cmd(conf->wait_barrier,
1031 conf->nr_pending == conf->nr_queued+1, 1030 conf->nr_pending == conf->nr_queued+1,
1032 conf->resync_lock, 1031 conf->resync_lock,
1033 flush_pending_writes(conf)); 1032 flush_pending_writes(conf));
1034 1033
1035 spin_unlock_irq(&conf->resync_lock); 1034 spin_unlock_irq(&conf->resync_lock);
1036} 1035}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c5439dce0295..2bf617d6f4fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -466,7 +466,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
466 do { 466 do {
467 wait_event_lock_irq(conf->wait_for_stripe, 467 wait_event_lock_irq(conf->wait_for_stripe,
468 conf->quiesce == 0 || noquiesce, 468 conf->quiesce == 0 || noquiesce,
469 conf->device_lock, /* nothing */); 469 conf->device_lock);
470 sh = __find_stripe(conf, sector, conf->generation - previous); 470 sh = __find_stripe(conf, sector, conf->generation - previous);
471 if (!sh) { 471 if (!sh) {
472 if (!conf->inactive_blocked) 472 if (!conf->inactive_blocked)
@@ -480,8 +480,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
480 (atomic_read(&conf->active_stripes) 480 (atomic_read(&conf->active_stripes)
481 < (conf->max_nr_stripes *3/4) 481 < (conf->max_nr_stripes *3/4)
482 || !conf->inactive_blocked), 482 || !conf->inactive_blocked),
483 conf->device_lock, 483 conf->device_lock);
484 );
485 conf->inactive_blocked = 0; 484 conf->inactive_blocked = 0;
486 } else 485 } else
487 init_stripe(sh, sector, previous); 486 init_stripe(sh, sector, previous);
@@ -1646,8 +1645,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1646 spin_lock_irq(&conf->device_lock); 1645 spin_lock_irq(&conf->device_lock);
1647 wait_event_lock_irq(conf->wait_for_stripe, 1646 wait_event_lock_irq(conf->wait_for_stripe,
1648 !list_empty(&conf->inactive_list), 1647 !list_empty(&conf->inactive_list),
1649 conf->device_lock, 1648 conf->device_lock);
1650 );
1651 osh = get_free_stripe(conf); 1649 osh = get_free_stripe(conf);
1652 spin_unlock_irq(&conf->device_lock); 1650 spin_unlock_irq(&conf->device_lock);
1653 atomic_set(&nsh->count, 1); 1651 atomic_set(&nsh->count, 1);
@@ -4000,7 +3998,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4000 spin_lock_irq(&conf->device_lock); 3998 spin_lock_irq(&conf->device_lock);
4001 wait_event_lock_irq(conf->wait_for_stripe, 3999 wait_event_lock_irq(conf->wait_for_stripe,
4002 conf->quiesce == 0, 4000 conf->quiesce == 0,
4003 conf->device_lock, /* nothing */); 4001 conf->device_lock);
4004 atomic_inc(&conf->active_aligned_reads); 4002 atomic_inc(&conf->active_aligned_reads);
4005 spin_unlock_irq(&conf->device_lock); 4003 spin_unlock_irq(&conf->device_lock);
4006 4004
@@ -6088,7 +6086,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
6088 wait_event_lock_irq(conf->wait_for_stripe, 6086 wait_event_lock_irq(conf->wait_for_stripe,
6089 atomic_read(&conf->active_stripes) == 0 && 6087 atomic_read(&conf->active_stripes) == 0 &&
6090 atomic_read(&conf->active_aligned_reads) == 0, 6088 atomic_read(&conf->active_aligned_reads) == 0,
6091 conf->device_lock, /* nothing */); 6089 conf->device_lock);
6092 conf->quiesce = 1; 6090 conf->quiesce = 1;
6093 spin_unlock_irq(&conf->device_lock); 6091 spin_unlock_irq(&conf->device_lock);
6094 /* allow reshape to continue */ 6092 /* allow reshape to continue */