diff options
author | Kent Overstreet <koverstreet@google.com> | 2012-09-06 18:34:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-09-09 04:35:38 -0400 |
commit | 395c72a707d966b36d5a42fe12c3a237ded3a0d9 (patch) | |
tree | 79e4450a4f31409815d80ee8e1a7e1490a140f22 | |
parent | eeea3ac912207dcf759b95b2b4c36f96bce583bf (diff) |
block: Generalized bio pool freeing
With the old code, when you allocate a bio from a bio pool you have to
implement your own destructor that knows how to find the bio pool the
bio was originally allocated from.
This adds a new field to struct bio (bi_pool) and changes
bio_alloc_bioset() to use it. This makes various bio destructors
unnecessary, so they're then deleted.
v6: Explain the temporary if statement in bio_put
Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: NeilBrown <neilb@suse.de>
CC: Alasdair Kergon <agk@redhat.com>
CC: Nicholas Bellinger <nab@linux-iscsi.org>
CC: Lars Ellenberg <lars.ellenberg@linbit.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Nicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 13 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 11 | ||||
-rw-r--r-- | drivers/md/dm.c | 20 | ||||
-rw-r--r-- | drivers/md/md.c | 28 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 9 | ||||
-rw-r--r-- | fs/bio.c | 31 | ||||
-rw-r--r-- | include/linux/blk_types.h | 3 |
8 files changed, 21 insertions, 103 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f93a0320e952..f55683ad4ffa 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -162,23 +162,12 @@ static const struct block_device_operations drbd_ops = { | |||
162 | .release = drbd_release, | 162 | .release = drbd_release, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static void bio_destructor_drbd(struct bio *bio) | ||
166 | { | ||
167 | bio_free(bio, drbd_md_io_bio_set); | ||
168 | } | ||
169 | |||
170 | struct bio *bio_alloc_drbd(gfp_t gfp_mask) | 165 | struct bio *bio_alloc_drbd(gfp_t gfp_mask) |
171 | { | 166 | { |
172 | struct bio *bio; | ||
173 | |||
174 | if (!drbd_md_io_bio_set) | 167 | if (!drbd_md_io_bio_set) |
175 | return bio_alloc(gfp_mask, 1); | 168 | return bio_alloc(gfp_mask, 1); |
176 | 169 | ||
177 | bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); | 170 | return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); |
178 | if (!bio) | ||
179 | return NULL; | ||
180 | bio->bi_destructor = bio_destructor_drbd; | ||
181 | return bio; | ||
182 | } | 171 | } |
183 | 172 | ||
184 | #ifdef __CHECKER__ | 173 | #ifdef __CHECKER__ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 664743d6a6cd..3c0acba042b6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -798,14 +798,6 @@ static int crypt_convert(struct crypt_config *cc, | |||
798 | return 0; | 798 | return 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static void dm_crypt_bio_destructor(struct bio *bio) | ||
802 | { | ||
803 | struct dm_crypt_io *io = bio->bi_private; | ||
804 | struct crypt_config *cc = io->cc; | ||
805 | |||
806 | bio_free(bio, cc->bs); | ||
807 | } | ||
808 | |||
809 | /* | 801 | /* |
810 | * Generate a new unfragmented bio with the given size | 802 | * Generate a new unfragmented bio with the given size |
811 | * This should never violate the device limitations | 803 | * This should never violate the device limitations |
@@ -974,7 +966,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
974 | clone->bi_end_io = crypt_endio; | 966 | clone->bi_end_io = crypt_endio; |
975 | clone->bi_bdev = cc->dev->bdev; | 967 | clone->bi_bdev = cc->dev->bdev; |
976 | clone->bi_rw = io->base_bio->bi_rw; | 968 | clone->bi_rw = io->base_bio->bi_rw; |
977 | clone->bi_destructor = dm_crypt_bio_destructor; | ||
978 | } | 969 | } |
979 | 970 | ||
980 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 971 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea5dd289fe2a..1c46f97d6664 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -249,16 +249,6 @@ static void vm_dp_init(struct dpages *dp, void *data) | |||
249 | dp->context_ptr = data; | 249 | dp->context_ptr = data; |
250 | } | 250 | } |
251 | 251 | ||
252 | static void dm_bio_destructor(struct bio *bio) | ||
253 | { | ||
254 | unsigned region; | ||
255 | struct io *io; | ||
256 | |||
257 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | ||
258 | |||
259 | bio_free(bio, io->client->bios); | ||
260 | } | ||
261 | |||
262 | /* | 252 | /* |
263 | * Functions for getting the pages from kernel memory. | 253 | * Functions for getting the pages from kernel memory. |
264 | */ | 254 | */ |
@@ -317,7 +307,6 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
317 | bio->bi_sector = where->sector + (where->count - remaining); | 307 | bio->bi_sector = where->sector + (where->count - remaining); |
318 | bio->bi_bdev = where->bdev; | 308 | bio->bi_bdev = where->bdev; |
319 | bio->bi_end_io = endio; | 309 | bio->bi_end_io = endio; |
320 | bio->bi_destructor = dm_bio_destructor; | ||
321 | store_io_and_region_in_bio(bio, io, region); | 310 | store_io_and_region_in_bio(bio, io, region); |
322 | 311 | ||
323 | if (rw & REQ_DISCARD) { | 312 | if (rw & REQ_DISCARD) { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4e09b6ff5b49..0c3d6dd51897 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -681,11 +681,6 @@ static void clone_endio(struct bio *bio, int error) | |||
681 | } | 681 | } |
682 | } | 682 | } |
683 | 683 | ||
684 | /* | ||
685 | * Store md for cleanup instead of tio which is about to get freed. | ||
686 | */ | ||
687 | bio->bi_private = md->bs; | ||
688 | |||
689 | free_tio(md, tio); | 684 | free_tio(md, tio); |
690 | bio_put(bio); | 685 | bio_put(bio); |
691 | dec_pending(io, error); | 686 | dec_pending(io, error); |
@@ -1032,11 +1027,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
1032 | /* error the io and bail out, or requeue it if needed */ | 1027 | /* error the io and bail out, or requeue it if needed */ |
1033 | md = tio->io->md; | 1028 | md = tio->io->md; |
1034 | dec_pending(tio->io, r); | 1029 | dec_pending(tio->io, r); |
1035 | /* | ||
1036 | * Store bio_set for cleanup. | ||
1037 | */ | ||
1038 | clone->bi_end_io = NULL; | ||
1039 | clone->bi_private = md->bs; | ||
1040 | bio_put(clone); | 1030 | bio_put(clone); |
1041 | free_tio(md, tio); | 1031 | free_tio(md, tio); |
1042 | } else if (r) { | 1032 | } else if (r) { |
@@ -1055,13 +1045,6 @@ struct clone_info { | |||
1055 | unsigned short idx; | 1045 | unsigned short idx; |
1056 | }; | 1046 | }; |
1057 | 1047 | ||
1058 | static void dm_bio_destructor(struct bio *bio) | ||
1059 | { | ||
1060 | struct bio_set *bs = bio->bi_private; | ||
1061 | |||
1062 | bio_free(bio, bs); | ||
1063 | } | ||
1064 | |||
1065 | /* | 1048 | /* |
1066 | * Creates a little bio that just does part of a bvec. | 1049 | * Creates a little bio that just does part of a bvec. |
1067 | */ | 1050 | */ |
@@ -1073,7 +1056,6 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1073 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1056 | struct bio_vec *bv = bio->bi_io_vec + idx; |
1074 | 1057 | ||
1075 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | 1058 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); |
1076 | clone->bi_destructor = dm_bio_destructor; | ||
1077 | *clone->bi_io_vec = *bv; | 1059 | *clone->bi_io_vec = *bv; |
1078 | 1060 | ||
1079 | clone->bi_sector = sector; | 1061 | clone->bi_sector = sector; |
@@ -1105,7 +1087,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1105 | 1087 | ||
1106 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | 1088 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
1107 | __bio_clone(clone, bio); | 1089 | __bio_clone(clone, bio); |
1108 | clone->bi_destructor = dm_bio_destructor; | ||
1109 | clone->bi_sector = sector; | 1090 | clone->bi_sector = sector; |
1110 | clone->bi_idx = idx; | 1091 | clone->bi_idx = idx; |
1111 | clone->bi_vcnt = idx + bv_count; | 1092 | clone->bi_vcnt = idx + bv_count; |
@@ -1150,7 +1131,6 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
1150 | */ | 1131 | */ |
1151 | clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); | 1132 | clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); |
1152 | __bio_clone(clone, ci->bio); | 1133 | __bio_clone(clone, ci->bio); |
1153 | clone->bi_destructor = dm_bio_destructor; | ||
1154 | if (len) { | 1134 | if (len) { |
1155 | clone->bi_sector = ci->sector; | 1135 | clone->bi_sector = ci->sector; |
1156 | clone->bi_size = to_bytes(len); | 1136 | clone->bi_size = to_bytes(len); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 3f6203a4c7ea..b8eebe357b2b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -155,32 +155,17 @@ static int start_readonly; | |||
155 | * like bio_clone, but with a local bio set | 155 | * like bio_clone, but with a local bio set |
156 | */ | 156 | */ |
157 | 157 | ||
158 | static void mddev_bio_destructor(struct bio *bio) | ||
159 | { | ||
160 | struct mddev *mddev, **mddevp; | ||
161 | |||
162 | mddevp = (void*)bio; | ||
163 | mddev = mddevp[-1]; | ||
164 | |||
165 | bio_free(bio, mddev->bio_set); | ||
166 | } | ||
167 | |||
168 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 158 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
169 | struct mddev *mddev) | 159 | struct mddev *mddev) |
170 | { | 160 | { |
171 | struct bio *b; | 161 | struct bio *b; |
172 | struct mddev **mddevp; | ||
173 | 162 | ||
174 | if (!mddev || !mddev->bio_set) | 163 | if (!mddev || !mddev->bio_set) |
175 | return bio_alloc(gfp_mask, nr_iovecs); | 164 | return bio_alloc(gfp_mask, nr_iovecs); |
176 | 165 | ||
177 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, | 166 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); |
178 | mddev->bio_set); | ||
179 | if (!b) | 167 | if (!b) |
180 | return NULL; | 168 | return NULL; |
181 | mddevp = (void*)b; | ||
182 | mddevp[-1] = mddev; | ||
183 | b->bi_destructor = mddev_bio_destructor; | ||
184 | return b; | 169 | return b; |
185 | } | 170 | } |
186 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 171 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
@@ -189,18 +174,14 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | |||
189 | struct mddev *mddev) | 174 | struct mddev *mddev) |
190 | { | 175 | { |
191 | struct bio *b; | 176 | struct bio *b; |
192 | struct mddev **mddevp; | ||
193 | 177 | ||
194 | if (!mddev || !mddev->bio_set) | 178 | if (!mddev || !mddev->bio_set) |
195 | return bio_clone(bio, gfp_mask); | 179 | return bio_clone(bio, gfp_mask); |
196 | 180 | ||
197 | b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, | 181 | b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, mddev->bio_set); |
198 | mddev->bio_set); | ||
199 | if (!b) | 182 | if (!b) |
200 | return NULL; | 183 | return NULL; |
201 | mddevp = (void*)b; | 184 | |
202 | mddevp[-1] = mddev; | ||
203 | b->bi_destructor = mddev_bio_destructor; | ||
204 | __bio_clone(b, bio); | 185 | __bio_clone(b, bio); |
205 | if (bio_integrity(bio)) { | 186 | if (bio_integrity(bio)) { |
206 | int ret; | 187 | int ret; |
@@ -5006,8 +4987,7 @@ int md_run(struct mddev *mddev) | |||
5006 | } | 4987 | } |
5007 | 4988 | ||
5008 | if (mddev->bio_set == NULL) | 4989 | if (mddev->bio_set == NULL) |
5009 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, | 4990 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); |
5010 | sizeof(struct mddev *)); | ||
5011 | 4991 | ||
5012 | spin_lock(&pers_lock); | 4992 | spin_lock(&pers_lock); |
5013 | pers = find_pers(mddev->level, mddev->clevel); | 4993 | pers = find_pers(mddev->level, mddev->clevel); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 76db75e836ed..e58cd7d2fce4 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -543,14 +543,6 @@ static void iblock_complete_cmd(struct se_cmd *cmd) | |||
543 | kfree(ibr); | 543 | kfree(ibr); |
544 | } | 544 | } |
545 | 545 | ||
546 | static void iblock_bio_destructor(struct bio *bio) | ||
547 | { | ||
548 | struct se_cmd *cmd = bio->bi_private; | ||
549 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | ||
550 | |||
551 | bio_free(bio, ib_dev->ibd_bio_set); | ||
552 | } | ||
553 | |||
554 | static struct bio * | 546 | static struct bio * |
555 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | 547 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) |
556 | { | 548 | { |
@@ -572,7 +564,6 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | |||
572 | 564 | ||
573 | bio->bi_bdev = ib_dev->ibd_bd; | 565 | bio->bi_bdev = ib_dev->ibd_bd; |
574 | bio->bi_private = cmd; | 566 | bio->bi_private = cmd; |
575 | bio->bi_destructor = iblock_bio_destructor; | ||
576 | bio->bi_end_io = &iblock_bio_done; | 567 | bio->bi_end_io = &iblock_bio_done; |
577 | bio->bi_sector = lba; | 568 | bio->bi_sector = lba; |
578 | return bio; | 569 | return bio; |
@@ -272,10 +272,6 @@ EXPORT_SYMBOL(bio_init); | |||
272 | * bio_alloc_bioset will try its own mempool to satisfy the allocation. | 272 | * bio_alloc_bioset will try its own mempool to satisfy the allocation. |
273 | * If %__GFP_WAIT is set then we will block on the internal pool waiting | 273 | * If %__GFP_WAIT is set then we will block on the internal pool waiting |
274 | * for a &struct bio to become free. | 274 | * for a &struct bio to become free. |
275 | * | ||
276 | * Note that the caller must set ->bi_destructor on successful return | ||
277 | * of a bio, to do the appropriate freeing of the bio once the reference | ||
278 | * count drops to zero. | ||
279 | **/ | 275 | **/ |
280 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 276 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
281 | { | 277 | { |
@@ -290,6 +286,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
290 | bio = p + bs->front_pad; | 286 | bio = p + bs->front_pad; |
291 | 287 | ||
292 | bio_init(bio); | 288 | bio_init(bio); |
289 | bio->bi_pool = bs; | ||
293 | 290 | ||
294 | if (unlikely(!nr_iovecs)) | 291 | if (unlikely(!nr_iovecs)) |
295 | goto out_set; | 292 | goto out_set; |
@@ -316,11 +313,6 @@ err_free: | |||
316 | } | 313 | } |
317 | EXPORT_SYMBOL(bio_alloc_bioset); | 314 | EXPORT_SYMBOL(bio_alloc_bioset); |
318 | 315 | ||
319 | static void bio_fs_destructor(struct bio *bio) | ||
320 | { | ||
321 | bio_free(bio, fs_bio_set); | ||
322 | } | ||
323 | |||
324 | /** | 316 | /** |
325 | * bio_alloc - allocate a new bio, memory pool backed | 317 | * bio_alloc - allocate a new bio, memory pool backed |
326 | * @gfp_mask: allocation mask to use | 318 | * @gfp_mask: allocation mask to use |
@@ -342,12 +334,7 @@ static void bio_fs_destructor(struct bio *bio) | |||
342 | */ | 334 | */ |
343 | struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | 335 | struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
344 | { | 336 | { |
345 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | 337 | return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); |
346 | |||
347 | if (bio) | ||
348 | bio->bi_destructor = bio_fs_destructor; | ||
349 | |||
350 | return bio; | ||
351 | } | 338 | } |
352 | EXPORT_SYMBOL(bio_alloc); | 339 | EXPORT_SYMBOL(bio_alloc); |
353 | 340 | ||
@@ -423,7 +410,16 @@ void bio_put(struct bio *bio) | |||
423 | if (atomic_dec_and_test(&bio->bi_cnt)) { | 410 | if (atomic_dec_and_test(&bio->bi_cnt)) { |
424 | bio_disassociate_task(bio); | 411 | bio_disassociate_task(bio); |
425 | bio->bi_next = NULL; | 412 | bio->bi_next = NULL; |
426 | bio->bi_destructor(bio); | 413 | |
414 | /* | ||
415 | * This if statement is temporary - bi_pool is replacing | ||
416 | * bi_destructor, but bi_destructor will be taken out in another | ||
417 | * patch. | ||
418 | */ | ||
419 | if (bio->bi_pool) | ||
420 | bio_free(bio, bio->bi_pool); | ||
421 | else | ||
422 | bio->bi_destructor(bio); | ||
427 | } | 423 | } |
428 | } | 424 | } |
429 | EXPORT_SYMBOL(bio_put); | 425 | EXPORT_SYMBOL(bio_put); |
@@ -474,12 +470,11 @@ EXPORT_SYMBOL(__bio_clone); | |||
474 | */ | 470 | */ |
475 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | 471 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) |
476 | { | 472 | { |
477 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); | 473 | struct bio *b = bio_alloc(gfp_mask, bio->bi_max_vecs); |
478 | 474 | ||
479 | if (!b) | 475 | if (!b) |
480 | return NULL; | 476 | return NULL; |
481 | 477 | ||
482 | b->bi_destructor = bio_fs_destructor; | ||
483 | __bio_clone(b, bio); | 478 | __bio_clone(b, bio); |
484 | 479 | ||
485 | if (bio_integrity(bio)) { | 480 | if (bio_integrity(bio)) { |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 7b7ac9ccec7a..af9dd9d2efc4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -80,6 +80,9 @@ struct bio { | |||
80 | struct bio_integrity_payload *bi_integrity; /* data integrity */ | 80 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | /* If bi_pool is non NULL, bi_destructor is not called */ | ||
84 | struct bio_set *bi_pool; | ||
85 | |||
83 | bio_destructor_t *bi_destructor; /* destructor */ | 86 | bio_destructor_t *bi_destructor; /* destructor */ |
84 | 87 | ||
85 | /* | 88 | /* |