aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2013-03-01 17:45:50 -0500
committerAlasdair G Kergon <agk@redhat.com>2013-03-01 17:45:50 -0500
commit6beca5eb6e801aea810da6cbc4990d96e6c1c0bc (patch)
tree37586c9ecdd7bbc89ce4db07abde3ca32d6a8e1c /drivers/md/dm-thin.c
parent4e7f1f9089660aec3b5ab2645ce62777c6f4c6a2 (diff)
dm bio prison: pass cell memory in
Change the dm_bio_prison interface so that instead of allocating memory internally, dm_bio_detain is supplied with a pre-allocated cell each time it is called. This enables a subsequent patch to move the allocation of the struct dm_bio_prison_cell outside the thin target's mapping function so it can no longer block there. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c103
1 files changed, 77 insertions, 26 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 35d9d0396cc2..5304e3a29a14 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -229,6 +229,54 @@ struct thin_c {
229 229
230/*----------------------------------------------------------------*/ 230/*----------------------------------------------------------------*/
231 231
232static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
233 struct dm_bio_prison_cell **cell_result)
234{
235 int r;
236 struct dm_bio_prison_cell *cell_prealloc;
237
238 /*
239 * Allocate a cell from the prison's mempool.
240 * This might block but it can't fail.
241 */
242 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
243
244 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
245 if (r)
246 /*
247 * We reused an old cell; we can get rid of
248 * the new one.
249 */
250 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
251
252 return r;
253}
254
255static void cell_release(struct pool *pool,
256 struct dm_bio_prison_cell *cell,
257 struct bio_list *bios)
258{
259 dm_cell_release(pool->prison, cell, bios);
260 dm_bio_prison_free_cell(pool->prison, cell);
261}
262
263static void cell_release_no_holder(struct pool *pool,
264 struct dm_bio_prison_cell *cell,
265 struct bio_list *bios)
266{
267 dm_cell_release_no_holder(pool->prison, cell, bios);
268 dm_bio_prison_free_cell(pool->prison, cell);
269}
270
271static void cell_error(struct pool *pool,
272 struct dm_bio_prison_cell *cell)
273{
274 dm_cell_error(pool->prison, cell);
275 dm_bio_prison_free_cell(pool->prison, cell);
276}
277
278/*----------------------------------------------------------------*/
279
232/* 280/*
233 * A global list of pools that uses a struct mapped_device as a key. 281 * A global list of pools that uses a struct mapped_device as a key.
234 */ 282 */
@@ -524,14 +572,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
524 unsigned long flags; 572 unsigned long flags;
525 573
526 spin_lock_irqsave(&pool->lock, flags); 574 spin_lock_irqsave(&pool->lock, flags);
527 dm_cell_release(cell, &pool->deferred_bios); 575 cell_release(pool, cell, &pool->deferred_bios);
528 spin_unlock_irqrestore(&tc->pool->lock, flags); 576 spin_unlock_irqrestore(&tc->pool->lock, flags);
529 577
530 wake_worker(pool); 578 wake_worker(pool);
531} 579}
532 580
533/* 581/*
534 * Same as cell_defer except it omits the original holder of the cell. 582 * Same as cell_defer above, except it omits the original holder of the cell.
535 */ 583 */
536static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 584static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
537{ 585{
@@ -539,7 +587,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
539 unsigned long flags; 587 unsigned long flags;
540 588
541 spin_lock_irqsave(&pool->lock, flags); 589 spin_lock_irqsave(&pool->lock, flags);
542 dm_cell_release_no_holder(cell, &pool->deferred_bios); 590 cell_release_no_holder(pool, cell, &pool->deferred_bios);
543 spin_unlock_irqrestore(&pool->lock, flags); 591 spin_unlock_irqrestore(&pool->lock, flags);
544 592
545 wake_worker(pool); 593 wake_worker(pool);
@@ -549,13 +597,14 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
549{ 597{
550 if (m->bio) 598 if (m->bio)
551 m->bio->bi_end_io = m->saved_bi_end_io; 599 m->bio->bi_end_io = m->saved_bi_end_io;
552 dm_cell_error(m->cell); 600 cell_error(m->tc->pool, m->cell);
553 list_del(&m->list); 601 list_del(&m->list);
554 mempool_free(m, m->tc->pool->mapping_pool); 602 mempool_free(m, m->tc->pool->mapping_pool);
555} 603}
556static void process_prepared_mapping(struct dm_thin_new_mapping *m) 604static void process_prepared_mapping(struct dm_thin_new_mapping *m)
557{ 605{
558 struct thin_c *tc = m->tc; 606 struct thin_c *tc = m->tc;
607 struct pool *pool = tc->pool;
559 struct bio *bio; 608 struct bio *bio;
560 int r; 609 int r;
561 610
@@ -564,7 +613,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
564 bio->bi_end_io = m->saved_bi_end_io; 613 bio->bi_end_io = m->saved_bi_end_io;
565 614
566 if (m->err) { 615 if (m->err) {
567 dm_cell_error(m->cell); 616 cell_error(pool, m->cell);
568 goto out; 617 goto out;
569 } 618 }
570 619
@@ -576,7 +625,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
576 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 625 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
577 if (r) { 626 if (r) {
578 DMERR_LIMIT("dm_thin_insert_block() failed"); 627 DMERR_LIMIT("dm_thin_insert_block() failed");
579 dm_cell_error(m->cell); 628 cell_error(pool, m->cell);
580 goto out; 629 goto out;
581 } 630 }
582 631
@@ -594,7 +643,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
594 643
595out: 644out:
596 list_del(&m->list); 645 list_del(&m->list);
597 mempool_free(m, tc->pool->mapping_pool); 646 mempool_free(m, pool->mapping_pool);
598} 647}
599 648
600static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) 649static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -745,7 +794,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
745 if (r < 0) { 794 if (r < 0) {
746 mempool_free(m, pool->mapping_pool); 795 mempool_free(m, pool->mapping_pool);
747 DMERR_LIMIT("dm_kcopyd_copy() failed"); 796 DMERR_LIMIT("dm_kcopyd_copy() failed");
748 dm_cell_error(cell); 797 cell_error(pool, cell);
749 } 798 }
750 } 799 }
751} 800}
@@ -811,7 +860,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
811 if (r < 0) { 860 if (r < 0) {
812 mempool_free(m, pool->mapping_pool); 861 mempool_free(m, pool->mapping_pool);
813 DMERR_LIMIT("dm_kcopyd_zero() failed"); 862 DMERR_LIMIT("dm_kcopyd_zero() failed");
814 dm_cell_error(cell); 863 cell_error(pool, cell);
815 } 864 }
816 } 865 }
817} 866}
@@ -917,13 +966,13 @@ static void retry_on_resume(struct bio *bio)
917 spin_unlock_irqrestore(&pool->lock, flags); 966 spin_unlock_irqrestore(&pool->lock, flags);
918} 967}
919 968
920static void no_space(struct dm_bio_prison_cell *cell) 969static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
921{ 970{
922 struct bio *bio; 971 struct bio *bio;
923 struct bio_list bios; 972 struct bio_list bios;
924 973
925 bio_list_init(&bios); 974 bio_list_init(&bios);
926 dm_cell_release(cell, &bios); 975 cell_release(pool, cell, &bios);
927 976
928 while ((bio = bio_list_pop(&bios))) 977 while ((bio = bio_list_pop(&bios)))
929 retry_on_resume(bio); 978 retry_on_resume(bio);
@@ -941,7 +990,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
941 struct dm_thin_new_mapping *m; 990 struct dm_thin_new_mapping *m;
942 991
943 build_virtual_key(tc->td, block, &key); 992 build_virtual_key(tc->td, block, &key);
944 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) 993 if (bio_detain(tc->pool, &key, bio, &cell))
945 return; 994 return;
946 995
947 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 996 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -953,7 +1002,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
953 * on this block. 1002 * on this block.
954 */ 1003 */
955 build_data_key(tc->td, lookup_result.block, &key2); 1004 build_data_key(tc->td, lookup_result.block, &key2);
956 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { 1005 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
957 cell_defer_no_holder(tc, cell); 1006 cell_defer_no_holder(tc, cell);
958 break; 1007 break;
959 } 1008 }
@@ -1029,13 +1078,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1029 break; 1078 break;
1030 1079
1031 case -ENOSPC: 1080 case -ENOSPC:
1032 no_space(cell); 1081 no_space(tc->pool, cell);
1033 break; 1082 break;
1034 1083
1035 default: 1084 default:
1036 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1085 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1037 __func__, r); 1086 __func__, r);
1038 dm_cell_error(cell); 1087 cell_error(tc->pool, cell);
1039 break; 1088 break;
1040 } 1089 }
1041} 1090}
@@ -1053,7 +1102,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1053 * of being broken so we have nothing further to do here. 1102 * of being broken so we have nothing further to do here.
1054 */ 1103 */
1055 build_data_key(tc->td, lookup_result->block, &key); 1104 build_data_key(tc->td, lookup_result->block, &key);
1056 if (dm_bio_detain(pool->prison, &key, bio, &cell)) 1105 if (bio_detain(pool, &key, bio, &cell))
1057 return; 1106 return;
1058 1107
1059 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1108 if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1074,12 +1123,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1074{ 1123{
1075 int r; 1124 int r;
1076 dm_block_t data_block; 1125 dm_block_t data_block;
1126 struct pool *pool = tc->pool;
1077 1127
1078 /* 1128 /*
1079 * Remap empty bios (flushes) immediately, without provisioning. 1129 * Remap empty bios (flushes) immediately, without provisioning.
1080 */ 1130 */
1081 if (!bio->bi_size) { 1131 if (!bio->bi_size) {
1082 inc_all_io_entry(tc->pool, bio); 1132 inc_all_io_entry(pool, bio);
1083 cell_defer_no_holder(tc, cell); 1133 cell_defer_no_holder(tc, cell);
1084 1134
1085 remap_and_issue(tc, bio, 0); 1135 remap_and_issue(tc, bio, 0);
@@ -1106,14 +1156,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1106 break; 1156 break;
1107 1157
1108 case -ENOSPC: 1158 case -ENOSPC:
1109 no_space(cell); 1159 no_space(pool, cell);
1110 break; 1160 break;
1111 1161
1112 default: 1162 default:
1113 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1163 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1114 __func__, r); 1164 __func__, r);
1115 set_pool_mode(tc->pool, PM_READ_ONLY); 1165 set_pool_mode(pool, PM_READ_ONLY);
1116 dm_cell_error(cell); 1166 cell_error(pool, cell);
1117 break; 1167 break;
1118 } 1168 }
1119} 1169}
@@ -1121,6 +1171,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1121static void process_bio(struct thin_c *tc, struct bio *bio) 1171static void process_bio(struct thin_c *tc, struct bio *bio)
1122{ 1172{
1123 int r; 1173 int r;
1174 struct pool *pool = tc->pool;
1124 dm_block_t block = get_bio_block(tc, bio); 1175 dm_block_t block = get_bio_block(tc, bio);
1125 struct dm_bio_prison_cell *cell; 1176 struct dm_bio_prison_cell *cell;
1126 struct dm_cell_key key; 1177 struct dm_cell_key key;
@@ -1131,7 +1182,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1131 * being provisioned so we have nothing further to do here. 1182 * being provisioned so we have nothing further to do here.
1132 */ 1183 */
1133 build_virtual_key(tc->td, block, &key); 1184 build_virtual_key(tc->td, block, &key);
1134 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell)) 1185 if (bio_detain(pool, &key, bio, &cell))
1135 return; 1186 return;
1136 1187
1137 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1188 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1139,9 +1190,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1139 case 0: 1190 case 0:
1140 if (lookup_result.shared) { 1191 if (lookup_result.shared) {
1141 process_shared_bio(tc, bio, block, &lookup_result); 1192 process_shared_bio(tc, bio, block, &lookup_result);
1142 cell_defer_no_holder(tc, cell); 1193 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1143 } else { 1194 } else {
1144 inc_all_io_entry(tc->pool, bio); 1195 inc_all_io_entry(pool, bio);
1145 cell_defer_no_holder(tc, cell); 1196 cell_defer_no_holder(tc, cell);
1146 1197
1147 remap_and_issue(tc, bio, lookup_result.block); 1198 remap_and_issue(tc, bio, lookup_result.block);
@@ -1150,7 +1201,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1150 1201
1151 case -ENODATA: 1202 case -ENODATA:
1152 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1203 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1153 inc_all_io_entry(tc->pool, bio); 1204 inc_all_io_entry(pool, bio);
1154 cell_defer_no_holder(tc, cell); 1205 cell_defer_no_holder(tc, cell);
1155 1206
1156 remap_to_origin_and_issue(tc, bio); 1207 remap_to_origin_and_issue(tc, bio);
@@ -1429,11 +1480,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1429 } 1480 }
1430 1481
1431 build_virtual_key(tc->td, block, &key); 1482 build_virtual_key(tc->td, block, &key);
1432 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1)) 1483 if (bio_detain(tc->pool, &key, bio, &cell1))
1433 return DM_MAPIO_SUBMITTED; 1484 return DM_MAPIO_SUBMITTED;
1434 1485
1435 build_data_key(tc->td, result.block, &key); 1486 build_data_key(tc->td, result.block, &key);
1436 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) { 1487 if (bio_detain(tc->pool, &key, bio, &cell2)) {
1437 cell_defer_no_holder(tc, cell1); 1488 cell_defer_no_holder(tc, cell1);
1438 return DM_MAPIO_SUBMITTED; 1489 return DM_MAPIO_SUBMITTED;
1439 } 1490 }