aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2014-12-09 10:58:35 -0500
committerJens Axboe <axboe@fb.com>2014-12-09 11:07:16 -0500
commitc38d185d4af12e8be63ca4b6745d99449c450f12 (patch)
tree7de590795fcaf4557a7c0e2dee3f73cedddd39a1 /block
parent9e98e9d7cf6e9d2ec1cce45e8d5ccaf3f9b386f3 (diff)
blk-mq: Fix a race between bt_clear_tag() and bt_get()
What we need is the following two guarantees: * Any thread that observes the effect of the test_and_set_bit() by __bt_get_word() also observes the preceding addition of 'current' to the appropriate wait list. This is guaranteed by the semantics of the spin_unlock() operation performed by prepare_and_wait(). Hence the conversion of test_and_set_bit_lock() into test_and_set_bit(). * The wait lists are examined by bt_clear() after the tag bit has been cleared. clear_bit_unlock() guarantees that any thread that observes that the bit has been cleared also observes the store operations preceding clear_bit_unlock(). However, clear_bit_unlock() does not prevent that the wait lists are examined before that the tag bit is cleared. Hence the addition of a memory barrier between clear_bit() and the wait list examination. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robert Elliott <elliott@hp.com> Cc: Ming Lei <ming.lei@canonical.com> Cc: Alexander Gordeev <agordeev@redhat.com> Cc: <stable@vger.kernel.org> # v3.13+ Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-tag.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 0f5e22a7971f..e47c4c75fd33 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -158,7 +158,7 @@ restart:
158 return -1; 158 return -1;
159 } 159 }
160 last_tag = tag + 1; 160 last_tag = tag + 1;
161 } while (test_and_set_bit_lock(tag, &bm->word)); 161 } while (test_and_set_bit(tag, &bm->word));
162 162
163 return tag; 163 return tag;
164} 164}
@@ -357,11 +357,10 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
357 struct bt_wait_state *bs; 357 struct bt_wait_state *bs;
358 int wait_cnt; 358 int wait_cnt;
359 359
360 /* 360 clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
361 * The unlock memory barrier need to order access to req in free 361
362 * path and clearing tag bit 362 /* Ensure that the wait list checks occur after clear_bit(). */
363 */ 363 smp_mb();
364 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
365 364
366 bs = bt_wake_ptr(bt); 365 bs = bt_wake_ptr(bt);
367 if (!bs) 366 if (!bs)