diff options
| author | Mikulas Patocka <mpatocka@redhat.com> | 2012-10-12 16:02:15 -0400 |
|---|---|---|
| committer | Alasdair G Kergon <agk@redhat.com> | 2012-10-12 16:02:15 -0400 |
| commit | dba141601d1327146c84b575bd581ea8730e901c (patch) | |
| tree | fb2f82b07d682e5e8cbe5bca94249ad7dc6bbbd7 | |
| parent | 4f81a4176297db57c7ef3b2893092dd837c1e2a8 (diff) | |
dm: store dm_target_io in bio front_pad
Use the recently-added bio front_pad field to allocate struct dm_target_io.
Prior to this patch, dm_target_io was allocated from a mempool. For each
dm_target_io, there is exactly one bio allocated from a bioset.
This patch merges these two allocations into one allocation: we create a
bioset with front_pad equal to the size of dm_target_io so that every
bio allocated from the bioset has sizeof(struct dm_target_io) bytes
before it. We allocate a bio and use the bytes before the bio as
dm_target_io.
_tio_cache is removed and the tio_pool mempool is now only used for
request-based devices.
This idea was introduced by Kent Overstreet.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: tj@kernel.org
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Bill Pemberton <wfp5p@viridian.itc.virginia.edu>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
| -rw-r--r-- | drivers/md/dm.c | 108 |
1 files changed, 49 insertions, 59 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 66ceaff6455c..02db9183ca01 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -71,6 +71,7 @@ struct dm_target_io { | |||
| 71 | struct dm_io *io; | 71 | struct dm_io *io; |
| 72 | struct dm_target *ti; | 72 | struct dm_target *ti; |
| 73 | union map_info info; | 73 | union map_info info; |
| 74 | struct bio clone; | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | /* | 77 | /* |
| @@ -214,7 +215,6 @@ struct dm_md_mempools { | |||
| 214 | 215 | ||
| 215 | #define MIN_IOS 256 | 216 | #define MIN_IOS 256 |
| 216 | static struct kmem_cache *_io_cache; | 217 | static struct kmem_cache *_io_cache; |
| 217 | static struct kmem_cache *_tio_cache; | ||
| 218 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| @@ -232,14 +232,9 @@ static int __init local_init(void) | |||
| 232 | if (!_io_cache) | 232 | if (!_io_cache) |
| 233 | return r; | 233 | return r; |
| 234 | 234 | ||
| 235 | /* allocate a slab for the target ios */ | ||
| 236 | _tio_cache = KMEM_CACHE(dm_target_io, 0); | ||
| 237 | if (!_tio_cache) | ||
| 238 | goto out_free_io_cache; | ||
| 239 | |||
| 240 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); | 235 | _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); |
| 241 | if (!_rq_tio_cache) | 236 | if (!_rq_tio_cache) |
| 242 | goto out_free_tio_cache; | 237 | goto out_free_io_cache; |
| 243 | 238 | ||
| 244 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); | 239 | _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0); |
| 245 | if (!_rq_bio_info_cache) | 240 | if (!_rq_bio_info_cache) |
| @@ -265,8 +260,6 @@ out_free_rq_bio_info_cache: | |||
| 265 | kmem_cache_destroy(_rq_bio_info_cache); | 260 | kmem_cache_destroy(_rq_bio_info_cache); |
| 266 | out_free_rq_tio_cache: | 261 | out_free_rq_tio_cache: |
| 267 | kmem_cache_destroy(_rq_tio_cache); | 262 | kmem_cache_destroy(_rq_tio_cache); |
| 268 | out_free_tio_cache: | ||
| 269 | kmem_cache_destroy(_tio_cache); | ||
| 270 | out_free_io_cache: | 263 | out_free_io_cache: |
| 271 | kmem_cache_destroy(_io_cache); | 264 | kmem_cache_destroy(_io_cache); |
| 272 | 265 | ||
| @@ -277,7 +270,6 @@ static void local_exit(void) | |||
| 277 | { | 270 | { |
| 278 | kmem_cache_destroy(_rq_bio_info_cache); | 271 | kmem_cache_destroy(_rq_bio_info_cache); |
| 279 | kmem_cache_destroy(_rq_tio_cache); | 272 | kmem_cache_destroy(_rq_tio_cache); |
| 280 | kmem_cache_destroy(_tio_cache); | ||
| 281 | kmem_cache_destroy(_io_cache); | 273 | kmem_cache_destroy(_io_cache); |
| 282 | unregister_blkdev(_major, _name); | 274 | unregister_blkdev(_major, _name); |
| 283 | dm_uevent_exit(); | 275 | dm_uevent_exit(); |
| @@ -463,7 +455,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io) | |||
| 463 | 455 | ||
| 464 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | 456 | static void free_tio(struct mapped_device *md, struct dm_target_io *tio) |
| 465 | { | 457 | { |
| 466 | mempool_free(tio, md->tio_pool); | 458 | bio_put(&tio->clone); |
| 467 | } | 459 | } |
| 468 | 460 | ||
| 469 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, | 461 | static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, |
| @@ -682,7 +674,6 @@ static void clone_endio(struct bio *bio, int error) | |||
| 682 | } | 674 | } |
| 683 | 675 | ||
| 684 | free_tio(md, tio); | 676 | free_tio(md, tio); |
| 685 | bio_put(bio); | ||
| 686 | dec_pending(io, error); | 677 | dec_pending(io, error); |
| 687 | } | 678 | } |
| 688 | 679 | ||
| @@ -1002,12 +993,12 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
| 1002 | } | 993 | } |
| 1003 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | 994 | EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); |
| 1004 | 995 | ||
| 1005 | static void __map_bio(struct dm_target *ti, struct bio *clone, | 996 | static void __map_bio(struct dm_target *ti, struct dm_target_io *tio) |
| 1006 | struct dm_target_io *tio) | ||
| 1007 | { | 997 | { |
| 1008 | int r; | 998 | int r; |
| 1009 | sector_t sector; | 999 | sector_t sector; |
| 1010 | struct mapped_device *md; | 1000 | struct mapped_device *md; |
| 1001 | struct bio *clone = &tio->clone; | ||
| 1011 | 1002 | ||
| 1012 | clone->bi_end_io = clone_endio; | 1003 | clone->bi_end_io = clone_endio; |
| 1013 | clone->bi_private = tio; | 1004 | clone->bi_private = tio; |
| @@ -1031,7 +1022,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
| 1031 | /* error the io and bail out, or requeue it if needed */ | 1022 | /* error the io and bail out, or requeue it if needed */ |
| 1032 | md = tio->io->md; | 1023 | md = tio->io->md; |
| 1033 | dec_pending(tio->io, r); | 1024 | dec_pending(tio->io, r); |
| 1034 | bio_put(clone); | ||
| 1035 | free_tio(md, tio); | 1025 | free_tio(md, tio); |
| 1036 | } else if (r) { | 1026 | } else if (r) { |
| 1037 | DMWARN("unimplemented target map return value: %d", r); | 1027 | DMWARN("unimplemented target map return value: %d", r); |
| @@ -1052,14 +1042,13 @@ struct clone_info { | |||
| 1052 | /* | 1042 | /* |
| 1053 | * Creates a little bio that just does part of a bvec. | 1043 | * Creates a little bio that just does part of a bvec. |
| 1054 | */ | 1044 | */ |
| 1055 | static struct bio *split_bvec(struct bio *bio, sector_t sector, | 1045 | static void split_bvec(struct dm_target_io *tio, struct bio *bio, |
| 1056 | unsigned short idx, unsigned int offset, | 1046 | sector_t sector, unsigned short idx, unsigned int offset, |
| 1057 | unsigned int len, struct bio_set *bs) | 1047 | unsigned int len, struct bio_set *bs) |
| 1058 | { | 1048 | { |
| 1059 | struct bio *clone; | 1049 | struct bio *clone = &tio->clone; |
| 1060 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1050 | struct bio_vec *bv = bio->bi_io_vec + idx; |
| 1061 | 1051 | ||
| 1062 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | ||
| 1063 | *clone->bi_io_vec = *bv; | 1052 | *clone->bi_io_vec = *bv; |
| 1064 | 1053 | ||
| 1065 | clone->bi_sector = sector; | 1054 | clone->bi_sector = sector; |
| @@ -1076,20 +1065,18 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
| 1076 | bio_integrity_trim(clone, | 1065 | bio_integrity_trim(clone, |
| 1077 | bio_sector_offset(bio, idx, offset), len); | 1066 | bio_sector_offset(bio, idx, offset), len); |
| 1078 | } | 1067 | } |
| 1079 | |||
| 1080 | return clone; | ||
| 1081 | } | 1068 | } |
| 1082 | 1069 | ||
| 1083 | /* | 1070 | /* |
| 1084 | * Creates a bio that consists of range of complete bvecs. | 1071 | * Creates a bio that consists of range of complete bvecs. |
| 1085 | */ | 1072 | */ |
| 1086 | static struct bio *clone_bio(struct bio *bio, sector_t sector, | 1073 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
| 1087 | unsigned short idx, unsigned short bv_count, | 1074 | sector_t sector, unsigned short idx, |
| 1088 | unsigned int len, struct bio_set *bs) | 1075 | unsigned short bv_count, unsigned int len, |
| 1076 | struct bio_set *bs) | ||
| 1089 | { | 1077 | { |
| 1090 | struct bio *clone; | 1078 | struct bio *clone = &tio->clone; |
| 1091 | 1079 | ||
| 1092 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | ||
| 1093 | __bio_clone(clone, bio); | 1080 | __bio_clone(clone, bio); |
| 1094 | clone->bi_sector = sector; | 1081 | clone->bi_sector = sector; |
| 1095 | clone->bi_idx = idx; | 1082 | clone->bi_idx = idx; |
| @@ -1104,14 +1091,16 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
| 1104 | bio_integrity_trim(clone, | 1091 | bio_integrity_trim(clone, |
| 1105 | bio_sector_offset(bio, idx, 0), len); | 1092 | bio_sector_offset(bio, idx, 0), len); |
| 1106 | } | 1093 | } |
| 1107 | |||
| 1108 | return clone; | ||
| 1109 | } | 1094 | } |
| 1110 | 1095 | ||
| 1111 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1096 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
| 1112 | struct dm_target *ti) | 1097 | struct dm_target *ti, int nr_iovecs) |
| 1113 | { | 1098 | { |
| 1114 | struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO); | 1099 | struct dm_target_io *tio; |
| 1100 | struct bio *clone; | ||
| 1101 | |||
| 1102 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs); | ||
| 1103 | tio = container_of(clone, struct dm_target_io, clone); | ||
| 1115 | 1104 | ||
| 1116 | tio->io = ci->io; | 1105 | tio->io = ci->io; |
| 1117 | tio->ti = ti; | 1106 | tio->ti = ti; |
| @@ -1123,8 +1112,8 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
| 1123 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | 1112 | static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, |
| 1124 | unsigned request_nr, sector_t len) | 1113 | unsigned request_nr, sector_t len) |
| 1125 | { | 1114 | { |
| 1126 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1115 | struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); |
| 1127 | struct bio *clone; | 1116 | struct bio *clone = &tio->clone; |
| 1128 | 1117 | ||
| 1129 | tio->info.target_request_nr = request_nr; | 1118 | tio->info.target_request_nr = request_nr; |
| 1130 | 1119 | ||
| @@ -1133,14 +1122,14 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
| 1133 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1122 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
| 1134 | * and discard, so no need for concern about wasted bvec allocations. | 1123 | * and discard, so no need for concern about wasted bvec allocations. |
| 1135 | */ | 1124 | */ |
| 1136 | clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs); | ||
| 1137 | 1125 | ||
| 1126 | __bio_clone(clone, ci->bio); | ||
| 1138 | if (len) { | 1127 | if (len) { |
| 1139 | clone->bi_sector = ci->sector; | 1128 | clone->bi_sector = ci->sector; |
| 1140 | clone->bi_size = to_bytes(len); | 1129 | clone->bi_size = to_bytes(len); |
| 1141 | } | 1130 | } |
| 1142 | 1131 | ||
| 1143 | __map_bio(ti, clone, tio); | 1132 | __map_bio(ti, tio); |
| 1144 | } | 1133 | } |
| 1145 | 1134 | ||
| 1146 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, | 1135 | static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, |
| @@ -1169,14 +1158,13 @@ static int __clone_and_map_empty_flush(struct clone_info *ci) | |||
| 1169 | */ | 1158 | */ |
| 1170 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) | 1159 | static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) |
| 1171 | { | 1160 | { |
| 1172 | struct bio *clone, *bio = ci->bio; | 1161 | struct bio *bio = ci->bio; |
| 1173 | struct dm_target_io *tio; | 1162 | struct dm_target_io *tio; |
| 1174 | 1163 | ||
| 1175 | tio = alloc_tio(ci, ti); | 1164 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
| 1176 | clone = clone_bio(bio, ci->sector, ci->idx, | 1165 | clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, |
| 1177 | bio->bi_vcnt - ci->idx, ci->sector_count, | 1166 | ci->sector_count, ci->md->bs); |
| 1178 | ci->md->bs); | 1167 | __map_bio(ti, tio); |
| 1179 | __map_bio(ti, clone, tio); | ||
| 1180 | ci->sector_count = 0; | 1168 | ci->sector_count = 0; |
| 1181 | } | 1169 | } |
| 1182 | 1170 | ||
| @@ -1214,7 +1202,7 @@ static int __clone_and_map_discard(struct clone_info *ci) | |||
| 1214 | 1202 | ||
| 1215 | static int __clone_and_map(struct clone_info *ci) | 1203 | static int __clone_and_map(struct clone_info *ci) |
| 1216 | { | 1204 | { |
| 1217 | struct bio *clone, *bio = ci->bio; | 1205 | struct bio *bio = ci->bio; |
| 1218 | struct dm_target *ti; | 1206 | struct dm_target *ti; |
| 1219 | sector_t len = 0, max; | 1207 | sector_t len = 0, max; |
| 1220 | struct dm_target_io *tio; | 1208 | struct dm_target_io *tio; |
| @@ -1254,10 +1242,10 @@ static int __clone_and_map(struct clone_info *ci) | |||
| 1254 | len += bv_len; | 1242 | len += bv_len; |
| 1255 | } | 1243 | } |
| 1256 | 1244 | ||
| 1257 | tio = alloc_tio(ci, ti); | 1245 | tio = alloc_tio(ci, ti, bio->bi_max_vecs); |
| 1258 | clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, | 1246 | clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len, |
| 1259 | ci->md->bs); | 1247 | ci->md->bs); |
| 1260 | __map_bio(ti, clone, tio); | 1248 | __map_bio(ti, tio); |
| 1261 | 1249 | ||
| 1262 | ci->sector += len; | 1250 | ci->sector += len; |
| 1263 | ci->sector_count -= len; | 1251 | ci->sector_count -= len; |
| @@ -1282,12 +1270,11 @@ static int __clone_and_map(struct clone_info *ci) | |||
| 1282 | 1270 | ||
| 1283 | len = min(remaining, max); | 1271 | len = min(remaining, max); |
| 1284 | 1272 | ||
| 1285 | tio = alloc_tio(ci, ti); | 1273 | tio = alloc_tio(ci, ti, 1); |
| 1286 | clone = split_bvec(bio, ci->sector, ci->idx, | 1274 | split_bvec(tio, bio, ci->sector, ci->idx, |
| 1287 | bv->bv_offset + offset, len, | 1275 | bv->bv_offset + offset, len, ci->md->bs); |
| 1288 | ci->md->bs); | ||
| 1289 | 1276 | ||
| 1290 | __map_bio(ti, clone, tio); | 1277 | __map_bio(ti, tio); |
| 1291 | 1278 | ||
| 1292 | ci->sector += len; | 1279 | ci->sector += len; |
| 1293 | ci->sector_count -= len; | 1280 | ci->sector_count -= len; |
| @@ -1955,7 +1942,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
| 1955 | { | 1942 | { |
| 1956 | struct dm_md_mempools *p; | 1943 | struct dm_md_mempools *p; |
| 1957 | 1944 | ||
| 1958 | if (md->io_pool && md->tio_pool && md->bs) | 1945 | if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) |
| 1959 | /* the md already has necessary mempools */ | 1946 | /* the md already has necessary mempools */ |
| 1960 | goto out; | 1947 | goto out; |
| 1961 | 1948 | ||
| @@ -2732,14 +2719,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) | |||
| 2732 | if (!pools->io_pool) | 2719 | if (!pools->io_pool) |
| 2733 | goto free_pools_and_out; | 2720 | goto free_pools_and_out; |
| 2734 | 2721 | ||
| 2735 | pools->tio_pool = (type == DM_TYPE_BIO_BASED) ? | 2722 | pools->tio_pool = NULL; |
| 2736 | mempool_create_slab_pool(MIN_IOS, _tio_cache) : | 2723 | if (type == DM_TYPE_REQUEST_BASED) { |
| 2737 | mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); | 2724 | pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache); |
| 2738 | if (!pools->tio_pool) | 2725 | if (!pools->tio_pool) |
| 2739 | goto free_io_pool_and_out; | 2726 | goto free_io_pool_and_out; |
| 2727 | } | ||
| 2740 | 2728 | ||
| 2741 | pools->bs = (type == DM_TYPE_BIO_BASED) ? | 2729 | pools->bs = (type == DM_TYPE_BIO_BASED) ? |
| 2742 | bioset_create(pool_size, 0) : | 2730 | bioset_create(pool_size, |
| 2731 | offsetof(struct dm_target_io, clone)) : | ||
| 2743 | bioset_create(pool_size, | 2732 | bioset_create(pool_size, |
| 2744 | offsetof(struct dm_rq_clone_bio_info, clone)); | 2733 | offsetof(struct dm_rq_clone_bio_info, clone)); |
| 2745 | if (!pools->bs) | 2734 | if (!pools->bs) |
| @@ -2754,7 +2743,8 @@ free_bioset_and_out: | |||
| 2754 | bioset_free(pools->bs); | 2743 | bioset_free(pools->bs); |
| 2755 | 2744 | ||
| 2756 | free_tio_pool_and_out: | 2745 | free_tio_pool_and_out: |
| 2757 | mempool_destroy(pools->tio_pool); | 2746 | if (pools->tio_pool) |
| 2747 | mempool_destroy(pools->tio_pool); | ||
| 2758 | 2748 | ||
| 2759 | free_io_pool_and_out: | 2749 | free_io_pool_and_out: |
| 2760 | mempool_destroy(pools->io_pool); | 2750 | mempool_destroy(pools->io_pool); |
