aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-12-19 05:29:55 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:45:08 -0500
commit725a97e43ee945cc813fffd9e628e50d703b973b (patch)
treeec67dbfccf0b3a43cb879056a1fb320b82b8dd2d /drivers/block
parent06d33e968d2c58143a7aaafa8963cf6a58099467 (diff)
drbd: fix potential access of on-stack wait_queue_head_t after return
I run into something declaring itself as "spinlock deadlock", BUG: spinlock lockup on CPU#1, kjournald/27816, ffff88000ad6bca0 Pid: 27816, comm: kjournald Tainted: G W 2.6.34.6 #2 Call Trace: <IRQ> [<ffffffff811ba0aa>] do_raw_spin_lock+0x11e/0x14d [<ffffffff81340fde>] _raw_spin_lock_irqsave+0x6a/0x81 [<ffffffff8103b694>] ? __wake_up+0x22/0x50 [<ffffffff8103b694>] __wake_up+0x22/0x50 [<ffffffffa07ff661>] bm_async_io_complete+0x258/0x299 [drbd] but the call traces do not fit at all, all other cpus are cpu_idle. I think it may be this race: drbd_bm_write_page wait_queue_head_t io_wait; atomic_t in_flight; bm_async_io submit_bio bm_async_io_complete if (atomic_dec_and_test(in_flight)) wait_event(io_wait, atomic_read(in_flight) == 0) return wake_up(io_wait) The wake_up now accesses the wait_queue_head_t spinlock, which is no longer valid, since the stack frame of drbd_bm_write_page has been clobbered now. Fix this by using struct completion, which does both the condition test as well as the wake_up inside its spinlock, so this race cannot happen. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_bitmap.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 314a3632303b..25428bc28476 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -897,7 +897,7 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
897struct bm_aio_ctx { 897struct bm_aio_ctx {
898 struct drbd_conf *mdev; 898 struct drbd_conf *mdev;
899 atomic_t in_flight; 899 atomic_t in_flight;
900 wait_queue_head_t io_wait; 900 struct completion done;
901 unsigned flags; 901 unsigned flags;
902#define BM_AIO_COPY_PAGES 1 902#define BM_AIO_COPY_PAGES 1
903 int error; 903 int error;
@@ -948,7 +948,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
948 bio_put(bio); 948 bio_put(bio);
949 949
950 if (atomic_dec_and_test(&ctx->in_flight)) 950 if (atomic_dec_and_test(&ctx->in_flight))
951 wake_up(&ctx->io_wait); 951 complete(&ctx->done);
952} 952}
953 953
954static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 954static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
@@ -1009,8 +1009,12 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1009 */ 1009 */
1010static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) 1010static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
1011{ 1011{
1012 struct bm_aio_ctx ctx = 1012 struct bm_aio_ctx ctx = {
1013 { .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0 }; 1013 .mdev = mdev,
1014 .in_flight = ATOMIC_INIT(1),
1015 .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1016 .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
1017 };
1014 struct drbd_bitmap *b = mdev->bitmap; 1018 struct drbd_bitmap *b = mdev->bitmap;
1015 int num_pages, i, count = 0; 1019 int num_pages, i, count = 0;
1016 unsigned long now; 1020 unsigned long now;
@@ -1031,10 +1035,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
1031 num_pages = b->bm_number_of_pages; 1035 num_pages = b->bm_number_of_pages;
1032 1036
1033 now = jiffies; 1037 now = jiffies;
1034 ctx.mdev = mdev;
1035 atomic_set(&ctx.in_flight, 1); /* one extra ref */
1036 init_waitqueue_head(&ctx.io_wait);
1037 ctx.error = 0;
1038 1038
1039 /* let the layers below us try to merge these bios... */ 1039 /* let the layers below us try to merge these bios... */
1040 for (i = 0; i < num_pages; i++) { 1040 for (i = 0; i < num_pages; i++) {
@@ -1060,8 +1060,13 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
1060 cond_resched(); 1060 cond_resched();
1061 } 1061 }
1062 1062
1063 atomic_dec(&ctx.in_flight); /* drop the extra ref */ 1063 /*
1064 wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); 1064 * We initialize ctx.in_flight to one to make sure bm_async_io_complete
1065 * will not complete() early, and decrement / test it here. If there
1066 * are still some bios in flight, we need to wait for them here.
1067 */
1068 if (!atomic_dec_and_test(&ctx.in_flight))
1069 wait_for_completion(&ctx.done);
1065 dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 1070 dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
1066 rw == WRITE ? "WRITE" : "READ", 1071 rw == WRITE ? "WRITE" : "READ",
1067 count, jiffies - now); 1072 count, jiffies - now);
@@ -1133,19 +1138,20 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
1133 */ 1138 */
1134int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 1139int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
1135{ 1140{
1136 struct bm_aio_ctx ctx = { .flags = BM_AIO_COPY_PAGES, }; 1141 struct bm_aio_ctx ctx = {
1142 .mdev = mdev,
1143 .in_flight = ATOMIC_INIT(1),
1144 .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1145 .flags = BM_AIO_COPY_PAGES,
1146 };
1137 1147
1138 if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { 1148 if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
1139 dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 1149 dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
1140 return 0; 1150 return 0;
1141 } 1151 }
1142 1152
1143 ctx.mdev = mdev;
1144 atomic_set(&ctx.in_flight, 1);
1145 init_waitqueue_head(&ctx.io_wait);
1146
1147 bm_page_io_async(&ctx, idx, WRITE_SYNC); 1153 bm_page_io_async(&ctx, idx, WRITE_SYNC);
1148 wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0); 1154 wait_for_completion(&ctx.done);
1149 1155
1150 if (ctx.error) 1156 if (ctx.error)
1151 drbd_chk_io_error(mdev, 1, true); 1157 drbd_chk_io_error(mdev, 1, true);