diff options
author | Joe Thornber <ejt@redhat.com> | 2014-03-03 10:46:42 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-03-05 15:26:58 -0500 |
commit | 18adc57779866ba451237dccca2ebf019be2fa20 (patch) | |
tree | a0f3a9d473fd433b7ffd5a7160ee55ad6d2ffd62 | |
parent | 3e1a0699095803e53072699a4a1485af7744601d (diff) |
dm thin: fix deadlock in __requeue_bio_list
The spin lock in requeue_io() was held for too long, allowing deadlock.
Don't worry, due to other issues addressed in the following "dm thin:
fix noflush suspend IO queueing" commit, this code was never called.
Fix this by taking the spin lock for a much shorter period of time.
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-thin.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 38a063f7afa4..f39876dd307d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -369,14 +369,18 @@ struct dm_thin_endio_hook { | |||
369 | struct dm_thin_new_mapping *overwrite_mapping; | 369 | struct dm_thin_new_mapping *overwrite_mapping; |
370 | }; | 370 | }; |
371 | 371 | ||
372 | static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | 372 | static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) |
373 | { | 373 | { |
374 | struct bio *bio; | 374 | struct bio *bio; |
375 | struct bio_list bios; | 375 | struct bio_list bios; |
376 | unsigned long flags; | ||
376 | 377 | ||
377 | bio_list_init(&bios); | 378 | bio_list_init(&bios); |
379 | |||
380 | spin_lock_irqsave(&tc->pool->lock, flags); | ||
378 | bio_list_merge(&bios, master); | 381 | bio_list_merge(&bios, master); |
379 | bio_list_init(master); | 382 | bio_list_init(master); |
383 | spin_unlock_irqrestore(&tc->pool->lock, flags); | ||
380 | 384 | ||
381 | while ((bio = bio_list_pop(&bios))) { | 385 | while ((bio = bio_list_pop(&bios))) { |
382 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 386 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
@@ -391,12 +395,9 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) | |||
391 | static void requeue_io(struct thin_c *tc) | 395 | static void requeue_io(struct thin_c *tc) |
392 | { | 396 | { |
393 | struct pool *pool = tc->pool; | 397 | struct pool *pool = tc->pool; |
394 | unsigned long flags; | ||
395 | 398 | ||
396 | spin_lock_irqsave(&pool->lock, flags); | 399 | requeue_bio_list(tc, &pool->deferred_bios); |
397 | __requeue_bio_list(tc, &pool->deferred_bios); | 400 | requeue_bio_list(tc, &pool->retry_on_resume_list); |
398 | __requeue_bio_list(tc, &pool->retry_on_resume_list); | ||
399 | spin_unlock_irqrestore(&pool->lock, flags); | ||
400 | } | 401 | } |
401 | 402 | ||
402 | static void error_retry_list(struct pool *pool) | 403 | static void error_retry_list(struct pool *pool) |