aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2013-03-01 17:45:50 -0500
committerAlasdair G Kergon <agk@redhat.com>2013-03-01 17:45:50 -0500
commit025b96853fe0bdc977d88b4242ca5e1f19d9bb66 (patch)
tree5bf111eac74bb2798495fd4975b0d8df72c8d351 /drivers/md/dm-thin.c
parent6beca5eb6e801aea810da6cbc4990d96e6c1c0bc (diff)
dm thin: remove cells from stack
This patch takes advantage of the new bio-prison interface where the memory is now passed in rather than using a mempool in bio-prison. This allows the map function to avoid performing potentially-blocking allocations that could lead to deadlocks: We want to avoid the cell allocation that is done in bio_detain. (The potential for mempool deadlocks still remains in other functions that use bio_detain.) Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5304e3a29a14..009339d62828 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -229,6 +229,17 @@ struct thin_c {
229 229
230/*----------------------------------------------------------------*/ 230/*----------------------------------------------------------------*/
231 231
232/*
233 * wake_worker() is used when new work is queued and when pool_resume is
234 * ready to continue deferred IO processing.
235 */
236static void wake_worker(struct pool *pool)
237{
238 queue_work(pool->wq, &pool->worker);
239}
240
241/*----------------------------------------------------------------*/
242
232static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, 243static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
233 struct dm_bio_prison_cell **cell_result) 244 struct dm_bio_prison_cell **cell_result)
234{ 245{
@@ -268,6 +279,19 @@ static void cell_release_no_holder(struct pool *pool,
268 dm_bio_prison_free_cell(pool->prison, cell); 279 dm_bio_prison_free_cell(pool->prison, cell);
269} 280}
270 281
282static void cell_defer_no_holder_no_free(struct thin_c *tc,
283 struct dm_bio_prison_cell *cell)
284{
285 struct pool *pool = tc->pool;
286 unsigned long flags;
287
288 spin_lock_irqsave(&pool->lock, flags);
289 dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
290 spin_unlock_irqrestore(&pool->lock, flags);
291
292 wake_worker(pool);
293}
294
271static void cell_error(struct pool *pool, 295static void cell_error(struct pool *pool,
272 struct dm_bio_prison_cell *cell) 296 struct dm_bio_prison_cell *cell)
273{ 297{
@@ -477,15 +501,6 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
477 issue(tc, bio); 501 issue(tc, bio);
478} 502}
479 503
480/*
481 * wake_worker() is used when new work is queued and when pool_resume is
482 * ready to continue deferred IO processing.
483 */
484static void wake_worker(struct pool *pool)
485{
486 queue_work(pool->wq, &pool->worker);
487}
488
489/*----------------------------------------------------------------*/ 504/*----------------------------------------------------------------*/
490 505
491/* 506/*
@@ -601,6 +616,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
601 list_del(&m->list); 616 list_del(&m->list);
602 mempool_free(m, m->tc->pool->mapping_pool); 617 mempool_free(m, m->tc->pool->mapping_pool);
603} 618}
619
604static void process_prepared_mapping(struct dm_thin_new_mapping *m) 620static void process_prepared_mapping(struct dm_thin_new_mapping *m)
605{ 621{
606 struct thin_c *tc = m->tc; 622 struct thin_c *tc = m->tc;
@@ -1438,7 +1454,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1438 dm_block_t block = get_bio_block(tc, bio); 1454 dm_block_t block = get_bio_block(tc, bio);
1439 struct dm_thin_device *td = tc->td; 1455 struct dm_thin_device *td = tc->td;
1440 struct dm_thin_lookup_result result; 1456 struct dm_thin_lookup_result result;
1441 struct dm_bio_prison_cell *cell1, *cell2; 1457 struct dm_bio_prison_cell cell1, cell2;
1458 struct dm_bio_prison_cell *cell_result;
1442 struct dm_cell_key key; 1459 struct dm_cell_key key;
1443 1460
1444 thin_hook_bio(tc, bio); 1461 thin_hook_bio(tc, bio);
@@ -1480,18 +1497,18 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1480 } 1497 }
1481 1498
1482 build_virtual_key(tc->td, block, &key); 1499 build_virtual_key(tc->td, block, &key);
1483 if (bio_detain(tc->pool, &key, bio, &cell1)) 1500 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1484 return DM_MAPIO_SUBMITTED; 1501 return DM_MAPIO_SUBMITTED;
1485 1502
1486 build_data_key(tc->td, result.block, &key); 1503 build_data_key(tc->td, result.block, &key);
1487 if (bio_detain(tc->pool, &key, bio, &cell2)) { 1504 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1488 cell_defer_no_holder(tc, cell1); 1505 cell_defer_no_holder_no_free(tc, &cell1);
1489 return DM_MAPIO_SUBMITTED; 1506 return DM_MAPIO_SUBMITTED;
1490 } 1507 }
1491 1508
1492 inc_all_io_entry(tc->pool, bio); 1509 inc_all_io_entry(tc->pool, bio);
1493 cell_defer_no_holder(tc, cell2); 1510 cell_defer_no_holder_no_free(tc, &cell2);
1494 cell_defer_no_holder(tc, cell1); 1511 cell_defer_no_holder_no_free(tc, &cell1);
1495 1512
1496 remap(tc, bio, result.block); 1513 remap(tc, bio, result.block);
1497 return DM_MAPIO_REMAPPED; 1514 return DM_MAPIO_REMAPPED;