diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 178 |
1 files changed, 69 insertions, 109 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 67ffa391edcf..02db9183ca01 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -71,6 +71,7 @@ struct dm_target_io { | |||
71 | struct dm_io *io; | 71 | struct dm_io *io; |
72 | struct dm_target *ti; | 72 | struct dm_target *ti; |
73 | union map_info info; | 73 | union map_info info; |
74 | struct bio clone; | ||
74 | }; | 75 | }; |
75 | 76 | ||
76 | /* | 77 | /* |
@@ -86,12 +87,17 @@ struct dm_rq_target_io { | |||
86 | }; | 87 | }; |
87 | 88 | ||
88 | /* | 89 | /* |
89 | * For request-based dm. | 90 | * For request-based dm - the bio clones we allocate are embedded in these |
90 | * One of these is allocated per bio. | 91 | * structs. |
92 | * | ||
93 | * We allocate these with bio_alloc_bioset, using the front_pad parameter when | ||
94 | * the bioset is created - this means the bio has to come at the end of the | ||
95 | * struct. | ||
91 | */ | 96 | */ |
92 | struct dm_rq_clone_bio_info { | 97 | struct dm_rq_clone_bio_info { |
93 | struct bio *orig; | 98 | struct bio *orig; |
94 | struct dm_rq_target_io *tio; | 99 | struct dm_rq_target_io *tio; |
100 | struct bio clone; | ||
95 | }; | 101 | }; |
96 | 102 | ||
97 | union map_info *dm_get_mapinfo(struct bio *bio) | 103 | union map_info *dm_get_mapinfo(struct bio *bio) |
@@ -209,8 +215,12 @@ struct dm_md_mempools { | |||
209 | 215 | ||
210 | #define MIN_IOS 256 | 216 | #define MIN_IOS 256 |
211 | static struct kmem_cache *_io_cache; | 217 | static struct kmem_cache *_io_cache; |
212 | static struct kmem_cache *_tio_cache; | ||
213 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
219 | |||
220 | /* | ||
221 | * Unused now, and needs to be deleted. But since io_pool is overloaded and it's | ||
222 | * still used for _io_cache, I'm leaving this for a later cleanup | ||
223 | */ | ||
214 | static struct kmem_cache *_rq_bio_info_cache; | 224 | static struct kmem_cache *_rq_bio_info_cache; |
215 | 225 | ||
216 | static int __init local_init(void) | 226 | static int __init local_init(void) |
@@ -222,14 +232,9 @@ static int __init local_init(void) | |||
222 | if (!_io_cache) | 232 | if (!_io_cache) |
223 | return r; | 233 | return r; |
224 | 234 | ||
225 | /* allocate a slab for the target ios */ | ||
226 | _tio_cache = KMEM_CACHE(dm_target_io, 0); | ||
227 | if (!_tio_cache) | ||
228 | goto out_free_io_cache; | ||
229 | |||
230 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); | 235 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); |
231 | if (!_rq_tio_cache) | 236 | if (!_rq_tio_cache) |
232 | goto out_free_tio_cache; | 237 | goto out_free_io_cache; |
233 | 238 | ||
234 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); | 239 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); |
235 | if (!_rq_bio_info_cache) | 240 | if (!_rq_bio_info_cache) |
@@ -255,8 +260,6 @@ out_free_rq_bio_info_cache: | |||
255 | kmem_cache_destroy(_rq_bio_info_cache); | 260 | kmem_cache_destroy(_rq_bio_info_cache); |
256 | out_free_rq_tio_cache: | 261 | out_free_rq_tio_cache: |
257 | kmem_cache_destroy(_rq_tio_cache); | 262 | kmem_cache_destroy(_rq_tio_cache); |
258 | out_free_tio_cache: | ||
259 | kmem_cache_destroy(_tio_cache); | ||
260 | out_free_io_cache: | 263 | out_free_io_cache: |
261 | kmem_cache_destroy(_io_cache); | 264 | kmem_cache_destroy(_io_cache); |
262 | 265 | ||
@@ -267,7 +270,6 @@ static void local_exit(void) | |||
267 | { | 270 | { |
268 | kmem_cache_destroy(_rq_bio_info_cache); | 271 | kmem_cache_destroy(_rq_bio_info_cache); |
269 | kmem_cache_destroy(_rq_tio_cache); | 272 | kmem_cache_destroy(_rq_tio_cache); |
270 | kmem_cache_destroy(_tio_cache); | ||
271 | kmem_cache_destroy(_io_cache); | 273 | kmem_cache_destroy(_io_cache); |
272 | unregister_blkdev(_major, _name); | 274 | unregister_blkdev(_major, _name); |
273 | dm_uevent_exit(); | 275 | dm_uevent_exit(); |
@@ -453,7 +455,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
453 | 455 | ||
454 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 456 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
455 | { | 457 | { |
456 | mempool_free(tio, md->tio_pool); | 458 | bio_put(&tio->clone); |
457 | } | 459 | } |
458 | 460 | ||
459 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, | 461 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, |
@@ -467,16 +469,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio) | |||
467 | mempool_free(tio, tio->md->tio_pool); | 469 | mempool_free(tio, tio->md->tio_pool); |
468 | } | 470 | } |
469 | 471 | ||
470 | static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) | ||
471 | { | ||
472 | return mempool_alloc(md->io_pool, GFP_ATOMIC); | ||
473 | } | ||
474 | |||
475 | static void free_bio_info(struct dm_rq_clone_bio_info *info) | ||
476 | { | ||
477 | mempool_free(info, info->tio->md->io_pool); | ||
478 | } | ||
479 | |||
480 | static int md_in_flight(struct mapped_device *md) | 472 | static int md_in_flight(struct mapped_device *md) |
481 | { | 473 | { |
482 | return atomic_read(&md->pending[READ]) + | 474 | return atomic_read(&md->pending[READ]) + |
@@ -681,13 +673,7 @@ static void clone_endio(struct bio *bio, int error) | |||
681 | } | 673 | } |
682 | } | 674 | } |
683 | 675 | ||
684 | /* | ||
685 | * Store md for cleanup instead of tio which is about to get freed. | ||
686 | */ | ||
687 | bio->bi_private = md->bs; | ||
688 | |||
689 | free_tio(md, tio); | 676 | free_tio(md, tio); |
690 | bio_put(bio); | ||
691 | dec_pending(io, error); | 677 | dec_pending(io, error); |
692 | } | 678 | } |
693 | 679 | ||
@@ -1007,12 +993,12 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
1007 | } | 993 | } |
1008 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | 994 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
1009 | 995 | ||
1010 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 996 | static void __map_bio(struct dm_target *ti, struct dm_target_io *tio) |
1011 | struct dm_target_io *tio) | ||
1012 | { | 997 | { |
1013 | int r; | 998 | int r; |
1014 | sector_t sector; | 999 | sector_t sector; |
1015 | struct mapped_device *md; | 1000 | struct mapped_device *md; |
1001 | struct bio *clone = &tio->clone; | ||
1016 | 1002 | ||
1017 | clone->bi_end_io = clone_endio; | 1003 | clone->bi_end_io = clone_endio; |
1018 | clone->bi_private = tio; | 1004 | clone->bi_private = tio; |
@@ -1036,12 +1022,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
1036 | /* error the io and bail out, or requeue it if needed */ | 1022 | /* error the io and bail out, or requeue it if needed */ |
1037 | md = tio->io->md; | 1023 | md = tio->io->md; |
1038 | dec_pending(tio->io, r); | 1024 | dec_pending(tio->io, r); |
1039 | /* | ||
1040 | * Store bio_set for cleanup. | ||
1041 | */ | ||
1042 | clone->bi_end_io = NULL; | ||
1043 | clone->bi_private = md->bs; | ||
1044 | bio_put(clone); | ||
1045 | free_tio(md, tio); | 1025 | free_tio(md, tio); |
1046 | } else if (r) { | 1026 | } else if (r) { |
1047 | DMWARN("unimplemented target map return value: %d", r); | 1027 | DMWARN("unimplemented target map return value: %d", r); |
@@ -1059,25 +1039,16 @@ struct clone_info { | |||
1059 | unsigned short idx; | 1039 | unsigned short idx; |
1060 | }; | 1040 | }; |
1061 | 1041 | ||
1062 | static void dm_bio_destructor(struct bio *bio) | ||
1063 | { | ||
1064 | struct bio_set *bs = bio->bi_private; | ||
1065 | |||
1066 | bio_free(bio, bs); | ||
1067 | } | ||
1068 | |||
1069 | /* | 1042 | /* |
1070 | * Creates a little bio that just does part of a bvec. | 1043 | * Creates a little bio that just does part of a bvec. |
1071 | */ | 1044 | */ |
1072 | static struct bio *split_bvec(struct bio *bio, sector_t sector, | 1045 | static void split_bvec(struct dm_target_io *tio, struct bio *bio, |
1073 | unsigned short idx, unsigned int offset, | 1046 | sector_t sector, unsigned short idx, unsigned int offset, |
1074 | unsigned int len, struct bio_set *bs) | 1047 | unsigned int len, struct bio_set *bs) |
1075 | { | 1048 | { |
1076 | struct bio *clone; | 1049 | struct bio *clone = &tio->clone; |
1077 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1050 | struct bio_vec *bv = bio->bi_io_vec + idx; |
1078 | 1051 | ||
1079 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | ||
1080 | clone->bi_destructor = dm_bio_destructor; | ||
1081 | *clone->bi_io_vec = *bv; | 1052 | *clone->bi_io_vec = *bv; |
1082 | 1053 | ||
1083 | clone->bi_sector = sector; | 1054 | clone->bi_sector = sector; |
@@ -1090,26 +1061,23 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1090 | clone->bi_flags |= 1 << BIO_CLONED; | 1061 | clone->bi_flags |= 1 << BIO_CLONED; |
1091 | 1062 | ||
1092 | if (bio_integrity(bio)) { | 1063 | if (bio_integrity(bio)) { |
1093 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); | 1064 | bio_integrity_clone(clone, bio, GFP_NOIO); |
1094 | bio_integrity_trim(clone, | 1065 | bio_integrity_trim(clone, |
1095 | bio_sector_offset(bio, idx, offset), len); | 1066 | bio_sector_offset(bio, idx, offset), len); |
1096 | } | 1067 | } |
1097 | |||
1098 | return clone; | ||
1099 | } | 1068 | } |
1100 | 1069 | ||
1101 | /* | 1070 | /* |
1102 | * Creates a bio that consists of range of complete bvecs. | 1071 | * Creates a bio that consists of range of complete bvecs. |
1103 | */ | 1072 | */ |
1104 | static struct bio *clone_bio(struct bio *bio, sector_t sector, | 1073 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
1105 | unsigned short idx, unsigned short bv_count, | 1074 | sector_t sector, unsigned short idx, |
1106 | unsigned int len, struct bio_set *bs) | 1075 | unsigned short bv_count, unsigned int len, |
1076 | struct bio_set *bs) | ||
1107 | { | 1077 | { |
1108 | struct bio *clone; | 1078 | struct bio *clone = &tio->clone; |
1109 | 1079 | ||
1110 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | ||
1111 | __bio_clone(clone, bio); | 1080 | __bio_clone(clone, bio); |
1112 | clone->bi_destructor = dm_bio_destructor; | ||
1113 | clone->bi_sector = sector; | 1081 | clone->bi_sector = sector; |
1114 | clone->bi_idx = idx; | 1082 | clone->bi_idx = idx; |
1115 | clone->bi_vcnt = idx + bv_count; | 1083 | clone->bi_vcnt = idx + bv_count; |
@@ -1117,20 +1085,22 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1117 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | 1085 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); |
1118 | 1086 | ||
1119 | if (bio_integrity(bio)) { | 1087 | if (bio_integrity(bio)) { |
1120 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); | 1088 | bio_integrity_clone(clone, bio, GFP_NOIO); |
1121 | 1089 | ||
1122 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) | 1090 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) |
1123 | bio_integrity_trim(clone, | 1091 | bio_integrity_trim(clone, |
1124 | bio_sector_offset(bio, idx, 0), len); | 1092 | bio_sector_offset(bio, idx, 0), len); |
1125 | } | 1093 | } |
1126 | |||
1127 | return clone; | ||
1128 | } | 1094 | } |
1129 | 1095 | ||
1130 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1096 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
1131 | struct dm_target *ti) | 1097 | struct dm_target *ti, int nr_iovecs) |
1132 | { | 1098 | { |
1133 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | 1099 | struct dm_target_io *tio; |
1100 | struct bio *clone; | ||
1101 | |||
1102 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); | ||
1103 | tio = container_of(clone, struct dm_target_io, clone); | ||
1134 | 1104 | ||
1135 | tio->io = ci->io; | 1105 | tio->io = ci->io; |
1136 | tio->ti = ti; | 1106 | tio->ti = ti; |
@@ -1142,8 +1112,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
1142 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | 1112 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, |
1143 | unsigned request_nr, sector_t len) | 1113 | unsigned request_nr, sector_t len) |
1144 | { | 1114 | { |
1145 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1115 | struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); |
1146 | struct bio *clone; | 1116 | struct bio *clone = &tio->clone; |
1147 | 1117 | ||
1148 | tio->info.target_request_nr = request_nr; | 1118 | tio->info.target_request_nr = request_nr; |
1149 | 1119 | ||
@@ -1152,15 +1122,14 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
1152 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1122 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
1153 | * and discard, so no need for concern about wasted bvec allocations. | 1123 | * and discard, so no need for concern about wasted bvec allocations. |
1154 | */ | 1124 | */ |
1155 | clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); | 1125 | |
1156 | __bio_clone(clone, ci->bio); | 1126 | __bio_clone(clone, ci->bio); |
1157 | clone->bi_destructor = dm_bio_destructor; | ||
1158 | if (len) { | 1127 | if (len) { |
1159 | clone->bi_sector = ci->sector; | 1128 | clone->bi_sector = ci->sector; |
1160 | clone->bi_size = to_bytes(len); | 1129 | clone->bi_size = to_bytes(len); |
1161 | } | 1130 | } |
1162 | 1131 | ||
1163 | __map_bio(ti, clone, tio); | 1132 | __map_bio(ti, tio); |
1164 | } | 1133 | } |
1165 | 1134 | ||
1166 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, | 1135 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, |
@@ -1189,14 +1158,13 @@ static int __clone_and_map_empty_flush(struct clone_info *ci) | |||
1189 | */ | 1158 | */ |
1190 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) | 1159 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) |
1191 | { | 1160 | { |
1192 | struct bio *clone, *bio = ci->bio; | 1161 | struct bio *bio = ci->bio; |
1193 | struct dm_target_io *tio; | 1162 | struct dm_target_io *tio; |
1194 | 1163 | ||
1195 | tio = alloc_tio(ci, ti); | 1164 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
1196 | clone = clone_bio(bio, ci->sector, ci->idx, | 1165 | clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, |
1197 | bio->bi_vcnt - ci->idx, ci->sector_count, | 1166 | ci->sector_count, ci->md->bs); |
1198 | ci->md->bs); | 1167 | __map_bio(ti, tio); |
1199 | __map_bio(ti, clone, tio); | ||
1200 | ci->sector_count = 0; | 1168 | ci->sector_count = 0; |
1201 | } | 1169 | } |
1202 | 1170 | ||
@@ -1234,7 +1202,7 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
1234 | 1202 | ||
1235 | static int __clone_and_map(struct clone_info *ci) | 1203 | static int __clone_and_map(struct clone_info *ci) |
1236 | { | 1204 | { |
1237 | struct bio *clone, *bio = ci->bio; | 1205 | struct bio *bio = ci->bio; |
1238 | struct dm_target *ti; | 1206 | struct dm_target *ti; |
1239 | sector_t len = 0, max; | 1207 | sector_t len = 0, max; |
1240 | struct dm_target_io *tio; | 1208 | struct dm_target_io *tio; |
@@ -1274,10 +1242,10 @@ static int __clone_and_map(struct clone_info *ci) | |||
1274 | len += bv_len; | 1242 | len += bv_len; |
1275 | } | 1243 | } |
1276 | 1244 | ||
1277 | tio = alloc_tio(ci, ti); | 1245 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
1278 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, | 1246 | clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len, |
1279 | ci->md->bs); | 1247 | ci->md->bs); |
1280 | __map_bio(ti, clone, tio); | 1248 | __map_bio(ti, tio); |
1281 | 1249 | ||
1282 | ci->sector += len; | 1250 | ci->sector += len; |
1283 | ci->sector_count -= len; | 1251 | ci->sector_count -= len; |
@@ -1302,12 +1270,11 @@ static int __clone_and_map(struct clone_info *ci) | |||
1302 | 1270 | ||
1303 | len = min(remaining, max); | 1271 | len = min(remaining, max); |
1304 | 1272 | ||
1305 | tio = alloc_tio(ci, ti); | 1273 | tio = alloc_tio(ci, ti, 1); |
1306 | clone = split_bvec(bio, ci->sector, ci->idx, | 1274 | split_bvec(tio, bio, ci->sector, ci->idx, |
1307 | bv->bv_offset + offset, len, | 1275 | bv->bv_offset + offset, len, ci->md->bs); |
1308 | ci->md->bs); | ||
1309 | 1276 | ||
1310 | __map_bio(ti, clone, tio); | 1277 | __map_bio(ti, tio); |
1311 | 1278 | ||
1312 | ci->sector += len; | 1279 | ci->sector += len; |
1313 | ci->sector_count -= len; | 1280 | ci->sector_count -= len; |
@@ -1484,30 +1451,17 @@ void dm_dispatch_request(struct request *rq) | |||
1484 | } | 1451 | } |
1485 | EXPORT_SYMBOL_GPL(dm_dispatch_request); | 1452 | EXPORT_SYMBOL_GPL(dm_dispatch_request); |
1486 | 1453 | ||
1487 | static void dm_rq_bio_destructor(struct bio *bio) | ||
1488 | { | ||
1489 | struct dm_rq_clone_bio_info *info = bio->bi_private; | ||
1490 | struct mapped_device *md = info->tio->md; | ||
1491 | |||
1492 | free_bio_info(info); | ||
1493 | bio_free(bio, md->bs); | ||
1494 | } | ||
1495 | |||
1496 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | 1454 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, |
1497 | void *data) | 1455 | void *data) |
1498 | { | 1456 | { |
1499 | struct dm_rq_target_io *tio = data; | 1457 | struct dm_rq_target_io *tio = data; |
1500 | struct mapped_device *md = tio->md; | 1458 | struct dm_rq_clone_bio_info *info = |
1501 | struct dm_rq_clone_bio_info *info = alloc_bio_info(md); | 1459 | container_of(bio, struct dm_rq_clone_bio_info, clone); |
1502 | |||
1503 | if (!info) | ||
1504 | return -ENOMEM; | ||
1505 | 1460 | ||
1506 | info->orig = bio_orig; | 1461 | info->orig = bio_orig; |
1507 | info->tio = tio; | 1462 | info->tio = tio; |
1508 | bio->bi_end_io = end_clone_bio; | 1463 | bio->bi_end_io = end_clone_bio; |
1509 | bio->bi_private = info; | 1464 | bio->bi_private = info; |
1510 | bio->bi_destructor = dm_rq_bio_destructor; | ||
1511 | 1465 | ||
1512 | return 0; | 1466 | return 0; |
1513 | } | 1467 | } |
@@ -1988,7 +1942,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
1988 | { | 1942 | { |
1989 | struct dm_md_mempools *p; | 1943 | struct dm_md_mempools *p; |
1990 | 1944 | ||
1991 | if (md->io_pool && md->tio_pool && md->bs) | 1945 | if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) |
1992 | /* the md already has necessary mempools */ | 1946 | /* the md already has necessary mempools */ |
1993 | goto out; | 1947 | goto out; |
1994 | 1948 | ||
@@ -2765,13 +2719,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) | |||
2765 | if (!pools->io_pool) | 2719 | if (!pools->io_pool) |
2766 | goto free_pools_and_out; | 2720 | goto free_pools_and_out; |
2767 | 2721 | ||
2768 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | 2722 | pools->tio_pool = NULL; |
2769 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | 2723 | if (type == DM_TYPE_REQUEST_BASED) { |
2770 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | 2724 | pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); |
2771 | if (!pools->tio_pool) | 2725 | if (!pools->tio_pool) |
2772 | goto free_io_pool_and_out; | 2726 | goto free_io_pool_and_out; |
2727 | } | ||
2773 | 2728 | ||
2774 | pools->bs = bioset_create(pool_size, 0); | 2729 | pools->bs = (type == DM_TYPE_BIO_BASED) ? |
2730 | bioset_create(pool_size, | ||
2731 | offsetof(struct dm_target_io, clone)) : | ||
2732 | bioset_create(pool_size, | ||
2733 | offsetof(struct dm_rq_clone_bio_info, clone)); | ||
2775 | if (!pools->bs) | 2734 | if (!pools->bs) |
2776 | goto free_tio_pool_and_out; | 2735 | goto free_tio_pool_and_out; |
2777 | 2736 | ||
@@ -2784,7 +2743,8 @@ free_bioset_and_out: | |||
2784 | bioset_free(pools->bs); | 2743 | bioset_free(pools->bs); |
2785 | 2744 | ||
2786 | free_tio_pool_and_out: | 2745 | free_tio_pool_and_out: |
2787 | mempool_destroy(pools->tio_pool); | 2746 | if (pools->tio_pool) |
2747 | mempool_destroy(pools->tio_pool); | ||
2788 | 2748 | ||
2789 | free_io_pool_and_out: | 2749 | free_io_pool_and_out: |
2790 | mempool_destroy(pools->io_pool); | 2750 | mempool_destroy(pools->io_pool); |