diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 13 | ||||
-rw-r--r-- | drivers/block/osdblk.c | 3 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 52 | ||||
-rw-r--r-- | drivers/char/raw.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 16 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 11 | ||||
-rw-r--r-- | drivers/md/dm.c | 74 | ||||
-rw-r--r-- | drivers/md/md.c | 44 | ||||
-rw-r--r-- | drivers/md/raid0.c | 1 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 9 |
10 files changed, 37 insertions, 188 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f93a0320e952..f55683ad4ffa 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -162,23 +162,12 @@ static const struct block_device_operations drbd_ops = { | |||
162 | .release = drbd_release, | 162 | .release = drbd_release, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static void bio_destructor_drbd(struct bio *bio) | ||
166 | { | ||
167 | bio_free(bio, drbd_md_io_bio_set); | ||
168 | } | ||
169 | |||
170 | struct bio *bio_alloc_drbd(gfp_t gfp_mask) | 165 | struct bio *bio_alloc_drbd(gfp_t gfp_mask) |
171 | { | 166 | { |
172 | struct bio *bio; | ||
173 | |||
174 | if (!drbd_md_io_bio_set) | 167 | if (!drbd_md_io_bio_set) |
175 | return bio_alloc(gfp_mask, 1); | 168 | return bio_alloc(gfp_mask, 1); |
176 | 169 | ||
177 | bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); | 170 | return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); |
178 | if (!bio) | ||
179 | return NULL; | ||
180 | bio->bi_destructor = bio_destructor_drbd; | ||
181 | return bio; | ||
182 | } | 171 | } |
183 | 172 | ||
184 | #ifdef __CHECKER__ | 173 | #ifdef __CHECKER__ |
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 87311ebac0db..1bbc681688e4 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c | |||
@@ -266,11 +266,10 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) | |||
266 | struct bio *tmp, *new_chain = NULL, *tail = NULL; | 266 | struct bio *tmp, *new_chain = NULL, *tail = NULL; |
267 | 267 | ||
268 | while (old_chain) { | 268 | while (old_chain) { |
269 | tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); | 269 | tmp = bio_clone_kmalloc(old_chain, gfpmask); |
270 | if (!tmp) | 270 | if (!tmp) |
271 | goto err_out; | 271 | goto err_out; |
272 | 272 | ||
273 | __bio_clone(tmp, old_chain); | ||
274 | tmp->bi_bdev = NULL; | 273 | tmp->bi_bdev = NULL; |
275 | gfpmask &= ~__GFP_WAIT; | 274 | gfpmask &= ~__GFP_WAIT; |
276 | tmp->bi_next = NULL; | 275 | tmp->bi_next = NULL; |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ba66e4445f41..2e7de7a59bfc 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -522,38 +522,6 @@ static void pkt_bio_finished(struct pktcdvd_device *pd) | |||
522 | } | 522 | } |
523 | } | 523 | } |
524 | 524 | ||
525 | static void pkt_bio_destructor(struct bio *bio) | ||
526 | { | ||
527 | kfree(bio->bi_io_vec); | ||
528 | kfree(bio); | ||
529 | } | ||
530 | |||
531 | static struct bio *pkt_bio_alloc(int nr_iovecs) | ||
532 | { | ||
533 | struct bio_vec *bvl = NULL; | ||
534 | struct bio *bio; | ||
535 | |||
536 | bio = kmalloc(sizeof(struct bio), GFP_KERNEL); | ||
537 | if (!bio) | ||
538 | goto no_bio; | ||
539 | bio_init(bio); | ||
540 | |||
541 | bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL); | ||
542 | if (!bvl) | ||
543 | goto no_bvl; | ||
544 | |||
545 | bio->bi_max_vecs = nr_iovecs; | ||
546 | bio->bi_io_vec = bvl; | ||
547 | bio->bi_destructor = pkt_bio_destructor; | ||
548 | |||
549 | return bio; | ||
550 | |||
551 | no_bvl: | ||
552 | kfree(bio); | ||
553 | no_bio: | ||
554 | return NULL; | ||
555 | } | ||
556 | |||
557 | /* | 525 | /* |
558 | * Allocate a packet_data struct | 526 | * Allocate a packet_data struct |
559 | */ | 527 | */ |
@@ -567,7 +535,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames) | |||
567 | goto no_pkt; | 535 | goto no_pkt; |
568 | 536 | ||
569 | pkt->frames = frames; | 537 | pkt->frames = frames; |
570 | pkt->w_bio = pkt_bio_alloc(frames); | 538 | pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames); |
571 | if (!pkt->w_bio) | 539 | if (!pkt->w_bio) |
572 | goto no_bio; | 540 | goto no_bio; |
573 | 541 | ||
@@ -581,9 +549,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames) | |||
581 | bio_list_init(&pkt->orig_bios); | 549 | bio_list_init(&pkt->orig_bios); |
582 | 550 | ||
583 | for (i = 0; i < frames; i++) { | 551 | for (i = 0; i < frames; i++) { |
584 | struct bio *bio = pkt_bio_alloc(1); | 552 | struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); |
585 | if (!bio) | 553 | if (!bio) |
586 | goto no_rd_bio; | 554 | goto no_rd_bio; |
555 | |||
587 | pkt->r_bios[i] = bio; | 556 | pkt->r_bios[i] = bio; |
588 | } | 557 | } |
589 | 558 | ||
@@ -1111,21 +1080,17 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1111 | * Schedule reads for missing parts of the packet. | 1080 | * Schedule reads for missing parts of the packet. |
1112 | */ | 1081 | */ |
1113 | for (f = 0; f < pkt->frames; f++) { | 1082 | for (f = 0; f < pkt->frames; f++) { |
1114 | struct bio_vec *vec; | ||
1115 | |||
1116 | int p, offset; | 1083 | int p, offset; |
1084 | |||
1117 | if (written[f]) | 1085 | if (written[f]) |
1118 | continue; | 1086 | continue; |
1087 | |||
1119 | bio = pkt->r_bios[f]; | 1088 | bio = pkt->r_bios[f]; |
1120 | vec = bio->bi_io_vec; | 1089 | bio_reset(bio); |
1121 | bio_init(bio); | ||
1122 | bio->bi_max_vecs = 1; | ||
1123 | bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); | 1090 | bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); |
1124 | bio->bi_bdev = pd->bdev; | 1091 | bio->bi_bdev = pd->bdev; |
1125 | bio->bi_end_io = pkt_end_io_read; | 1092 | bio->bi_end_io = pkt_end_io_read; |
1126 | bio->bi_private = pkt; | 1093 | bio->bi_private = pkt; |
1127 | bio->bi_io_vec = vec; | ||
1128 | bio->bi_destructor = pkt_bio_destructor; | ||
1129 | 1094 | ||
1130 | p = (f * CD_FRAMESIZE) / PAGE_SIZE; | 1095 | p = (f * CD_FRAMESIZE) / PAGE_SIZE; |
1131 | offset = (f * CD_FRAMESIZE) % PAGE_SIZE; | 1096 | offset = (f * CD_FRAMESIZE) % PAGE_SIZE; |
@@ -1418,14 +1383,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1418 | } | 1383 | } |
1419 | 1384 | ||
1420 | /* Start the write request */ | 1385 | /* Start the write request */ |
1421 | bio_init(pkt->w_bio); | 1386 | bio_reset(pkt->w_bio); |
1422 | pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE; | ||
1423 | pkt->w_bio->bi_sector = pkt->sector; | 1387 | pkt->w_bio->bi_sector = pkt->sector; |
1424 | pkt->w_bio->bi_bdev = pd->bdev; | 1388 | pkt->w_bio->bi_bdev = pd->bdev; |
1425 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; | 1389 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; |
1426 | pkt->w_bio->bi_private = pkt; | 1390 | pkt->w_bio->bi_private = pkt; |
1427 | pkt->w_bio->bi_io_vec = bvec; | ||
1428 | pkt->w_bio->bi_destructor = pkt_bio_destructor; | ||
1429 | for (f = 0; f < pkt->frames; f++) | 1391 | for (f = 0; f < pkt->frames; f++) |
1430 | if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) | 1392 | if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) |
1431 | BUG(); | 1393 | BUG(); |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 54a3a6d09819..0bb207eaef2f 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, | |||
285 | 285 | ||
286 | static const struct file_operations raw_fops = { | 286 | static const struct file_operations raw_fops = { |
287 | .read = do_sync_read, | 287 | .read = do_sync_read, |
288 | .aio_read = generic_file_aio_read, | 288 | .aio_read = blkdev_aio_read, |
289 | .write = do_sync_write, | 289 | .write = do_sync_write, |
290 | .aio_write = blkdev_aio_write, | 290 | .aio_write = blkdev_aio_write, |
291 | .fsync = blkdev_fsync, | 291 | .fsync = blkdev_fsync, |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 664743d6a6cd..bbf459bca61d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -798,14 +798,6 @@ static int crypt_convert(struct crypt_config *cc, | |||
798 | return 0; | 798 | return 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static void dm_crypt_bio_destructor(struct bio *bio) | ||
802 | { | ||
803 | struct dm_crypt_io *io = bio->bi_private; | ||
804 | struct crypt_config *cc = io->cc; | ||
805 | |||
806 | bio_free(bio, cc->bs); | ||
807 | } | ||
808 | |||
809 | /* | 801 | /* |
810 | * Generate a new unfragmented bio with the given size | 802 | * Generate a new unfragmented bio with the given size |
811 | * This should never violate the device limitations | 803 | * This should never violate the device limitations |
@@ -974,7 +966,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
974 | clone->bi_end_io = crypt_endio; | 966 | clone->bi_end_io = crypt_endio; |
975 | clone->bi_bdev = cc->dev->bdev; | 967 | clone->bi_bdev = cc->dev->bdev; |
976 | clone->bi_rw = io->base_bio->bi_rw; | 968 | clone->bi_rw = io->base_bio->bi_rw; |
977 | clone->bi_destructor = dm_crypt_bio_destructor; | ||
978 | } | 969 | } |
979 | 970 | ||
980 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 971 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
@@ -988,19 +979,14 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |||
988 | * copy the required bvecs because we need the original | 979 | * copy the required bvecs because we need the original |
989 | * one in order to decrypt the whole bio data *afterwards*. | 980 | * one in order to decrypt the whole bio data *afterwards*. |
990 | */ | 981 | */ |
991 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); | 982 | clone = bio_clone_bioset(base_bio, gfp, cc->bs); |
992 | if (!clone) | 983 | if (!clone) |
993 | return 1; | 984 | return 1; |
994 | 985 | ||
995 | crypt_inc_pending(io); | 986 | crypt_inc_pending(io); |
996 | 987 | ||
997 | clone_init(io, clone); | 988 | clone_init(io, clone); |
998 | clone->bi_idx = 0; | ||
999 | clone->bi_vcnt = bio_segments(base_bio); | ||
1000 | clone->bi_size = base_bio->bi_size; | ||
1001 | clone->bi_sector = cc->start + io->sector; | 989 | clone->bi_sector = cc->start + io->sector; |
1002 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), | ||
1003 | sizeof(struct bio_vec) * clone->bi_vcnt); | ||
1004 | 990 | ||
1005 | generic_make_request(clone); | 991 | generic_make_request(clone); |
1006 | return 0; | 992 | return 0; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea5dd289fe2a..1c46f97d6664 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -249,16 +249,6 @@ static void vm_dp_init(struct dpages *dp, void *data) | |||
249 | dp->context_ptr = data; | 249 | dp->context_ptr = data; |
250 | } | 250 | } |
251 | 251 | ||
252 | static void dm_bio_destructor(struct bio *bio) | ||
253 | { | ||
254 | unsigned region; | ||
255 | struct io *io; | ||
256 | |||
257 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | ||
258 | |||
259 | bio_free(bio, io->client->bios); | ||
260 | } | ||
261 | |||
262 | /* | 252 | /* |
263 | * Functions for getting the pages from kernel memory. | 253 | * Functions for getting the pages from kernel memory. |
264 | */ | 254 | */ |
@@ -317,7 +307,6 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
317 | bio->bi_sector = where->sector + (where->count - remaining); | 307 | bio->bi_sector = where->sector + (where->count - remaining); |
318 | bio->bi_bdev = where->bdev; | 308 | bio->bi_bdev = where->bdev; |
319 | bio->bi_end_io = endio; | 309 | bio->bi_end_io = endio; |
320 | bio->bi_destructor = dm_bio_destructor; | ||
321 | store_io_and_region_in_bio(bio, io, region); | 310 | store_io_and_region_in_bio(bio, io, region); |
322 | 311 | ||
323 | if (rw & REQ_DISCARD) { | 312 | if (rw & REQ_DISCARD) { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 67ffa391edcf..66ceaff6455c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -86,12 +86,17 @@ struct dm_rq_target_io { | |||
86 | }; | 86 | }; |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * For request-based dm. | 89 | * For request-based dm - the bio clones we allocate are embedded in these |
90 | * One of these is allocated per bio. | 90 | * structs. |
91 | * | ||
92 | * We allocate these with bio_alloc_bioset, using the front_pad parameter when | ||
93 | * the bioset is created - this means the bio has to come at the end of the | ||
94 | * struct. | ||
91 | */ | 95 | */ |
92 | struct dm_rq_clone_bio_info { | 96 | struct dm_rq_clone_bio_info { |
93 | struct bio *orig; | 97 | struct bio *orig; |
94 | struct dm_rq_target_io *tio; | 98 | struct dm_rq_target_io *tio; |
99 | struct bio clone; | ||
95 | }; | 100 | }; |
96 | 101 | ||
97 | union map_info *dm_get_mapinfo(struct bio *bio) | 102 | union map_info *dm_get_mapinfo(struct bio *bio) |
@@ -211,6 +216,11 @@ struct dm_md_mempools { | |||
211 | static struct kmem_cache *_io_cache; | 216 | static struct kmem_cache *_io_cache; |
212 | static struct kmem_cache *_tio_cache; | 217 | static struct kmem_cache *_tio_cache; |
213 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
219 | |||
220 | /* | ||
221 | * Unused now, and needs to be deleted. But since io_pool is overloaded and it's | ||
222 | * still used for _io_cache, I'm leaving this for a later cleanup | ||
223 | */ | ||
214 | static struct kmem_cache *_rq_bio_info_cache; | 224 | static struct kmem_cache *_rq_bio_info_cache; |
215 | 225 | ||
216 | static int __init local_init(void) | 226 | static int __init local_init(void) |
@@ -467,16 +477,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio) | |||
467 | mempool_free(tio, tio->md->tio_pool); | 477 | mempool_free(tio, tio->md->tio_pool); |
468 | } | 478 | } |
469 | 479 | ||
470 | static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) | ||
471 | { | ||
472 | return mempool_alloc(md->io_pool, GFP_ATOMIC); | ||
473 | } | ||
474 | |||
475 | static void free_bio_info(struct dm_rq_clone_bio_info *info) | ||
476 | { | ||
477 | mempool_free(info, info->tio->md->io_pool); | ||
478 | } | ||
479 | |||
480 | static int md_in_flight(struct mapped_device *md) | 480 | static int md_in_flight(struct mapped_device *md) |
481 | { | 481 | { |
482 | return atomic_read(&md->pending[READ]) + | 482 | return atomic_read(&md->pending[READ]) + |
@@ -681,11 +681,6 @@ static void clone_endio(struct bio *bio, int error) | |||
681 | } | 681 | } |
682 | } | 682 | } |
683 | 683 | ||
684 | /* | ||
685 | * Store md for cleanup instead of tio which is about to get freed. | ||
686 | */ | ||
687 | bio->bi_private = md->bs; | ||
688 | |||
689 | free_tio(md, tio); | 684 | free_tio(md, tio); |
690 | bio_put(bio); | 685 | bio_put(bio); |
691 | dec_pending(io, error); | 686 | dec_pending(io, error); |
@@ -1036,11 +1031,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
1036 | /* error the io and bail out, or requeue it if needed */ | 1031 | /* error the io and bail out, or requeue it if needed */ |
1037 | md = tio->io->md; | 1032 | md = tio->io->md; |
1038 | dec_pending(tio->io, r); | 1033 | dec_pending(tio->io, r); |
1039 | /* | ||
1040 | * Store bio_set for cleanup. | ||
1041 | */ | ||
1042 | clone->bi_end_io = NULL; | ||
1043 | clone->bi_private = md->bs; | ||
1044 | bio_put(clone); | 1034 | bio_put(clone); |
1045 | free_tio(md, tio); | 1035 | free_tio(md, tio); |
1046 | } else if (r) { | 1036 | } else if (r) { |
@@ -1059,13 +1049,6 @@ struct clone_info { | |||
1059 | unsigned short idx; | 1049 | unsigned short idx; |
1060 | }; | 1050 | }; |
1061 | 1051 | ||
1062 | static void dm_bio_destructor(struct bio *bio) | ||
1063 | { | ||
1064 | struct bio_set *bs = bio->bi_private; | ||
1065 | |||
1066 | bio_free(bio, bs); | ||
1067 | } | ||
1068 | |||
1069 | /* | 1052 | /* |
1070 | * Creates a little bio that just does part of a bvec. | 1053 | * Creates a little bio that just does part of a bvec. |
1071 | */ | 1054 | */ |
@@ -1077,7 +1060,6 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1077 | struct bio_vec *bv = bio->bi_io_vec + idx; | 1060 | struct bio_vec *bv = bio->bi_io_vec + idx; |
1078 | 1061 | ||
1079 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); | 1062 | clone = bio_alloc_bioset(GFP_NOIO, 1, bs); |
1080 | clone->bi_destructor = dm_bio_destructor; | ||
1081 | *clone->bi_io_vec = *bv; | 1063 | *clone->bi_io_vec = *bv; |
1082 | 1064 | ||
1083 | clone->bi_sector = sector; | 1065 | clone->bi_sector = sector; |
@@ -1090,7 +1072,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1090 | clone->bi_flags |= 1 << BIO_CLONED; | 1072 | clone->bi_flags |= 1 << BIO_CLONED; |
1091 | 1073 | ||
1092 | if (bio_integrity(bio)) { | 1074 | if (bio_integrity(bio)) { |
1093 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); | 1075 | bio_integrity_clone(clone, bio, GFP_NOIO); |
1094 | bio_integrity_trim(clone, | 1076 | bio_integrity_trim(clone, |
1095 | bio_sector_offset(bio, idx, offset), len); | 1077 | bio_sector_offset(bio, idx, offset), len); |
1096 | } | 1078 | } |
@@ -1109,7 +1091,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1109 | 1091 | ||
1110 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | 1092 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
1111 | __bio_clone(clone, bio); | 1093 | __bio_clone(clone, bio); |
1112 | clone->bi_destructor = dm_bio_destructor; | ||
1113 | clone->bi_sector = sector; | 1094 | clone->bi_sector = sector; |
1114 | clone->bi_idx = idx; | 1095 | clone->bi_idx = idx; |
1115 | clone->bi_vcnt = idx + bv_count; | 1096 | clone->bi_vcnt = idx + bv_count; |
@@ -1117,7 +1098,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1117 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); | 1098 | clone->bi_flags &= ~(1 << BIO_SEG_VALID); |
1118 | 1099 | ||
1119 | if (bio_integrity(bio)) { | 1100 | if (bio_integrity(bio)) { |
1120 | bio_integrity_clone(clone, bio, GFP_NOIO, bs); | 1101 | bio_integrity_clone(clone, bio, GFP_NOIO); |
1121 | 1102 | ||
1122 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) | 1103 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) |
1123 | bio_integrity_trim(clone, | 1104 | bio_integrity_trim(clone, |
@@ -1152,9 +1133,8 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, | |||
1152 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1133 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
1153 | * and discard, so no need for concern about wasted bvec allocations. | 1134 | * and discard, so no need for concern about wasted bvec allocations. |
1154 | */ | 1135 | */ |
1155 | clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); | 1136 | clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs); |
1156 | __bio_clone(clone, ci->bio); | 1137 | |
1157 | clone->bi_destructor = dm_bio_destructor; | ||
1158 | if (len) { | 1138 | if (len) { |
1159 | clone->bi_sector = ci->sector; | 1139 | clone->bi_sector = ci->sector; |
1160 | clone->bi_size = to_bytes(len); | 1140 | clone->bi_size = to_bytes(len); |
@@ -1484,30 +1464,17 @@ void dm_dispatch_request(struct request *rq) | |||
1484 | } | 1464 | } |
1485 | EXPORT_SYMBOL_GPL(dm_dispatch_request); | 1465 | EXPORT_SYMBOL_GPL(dm_dispatch_request); |
1486 | 1466 | ||
1487 | static void dm_rq_bio_destructor(struct bio *bio) | ||
1488 | { | ||
1489 | struct dm_rq_clone_bio_info *info = bio->bi_private; | ||
1490 | struct mapped_device *md = info->tio->md; | ||
1491 | |||
1492 | free_bio_info(info); | ||
1493 | bio_free(bio, md->bs); | ||
1494 | } | ||
1495 | |||
1496 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | 1467 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, |
1497 | void *data) | 1468 | void *data) |
1498 | { | 1469 | { |
1499 | struct dm_rq_target_io *tio = data; | 1470 | struct dm_rq_target_io *tio = data; |
1500 | struct mapped_device *md = tio->md; | 1471 | struct dm_rq_clone_bio_info *info = |
1501 | struct dm_rq_clone_bio_info *info = alloc_bio_info(md); | 1472 | container_of(bio, struct dm_rq_clone_bio_info, clone); |
1502 | |||
1503 | if (!info) | ||
1504 | return -ENOMEM; | ||
1505 | 1473 | ||
1506 | info->orig = bio_orig; | 1474 | info->orig = bio_orig; |
1507 | info->tio = tio; | 1475 | info->tio = tio; |
1508 | bio->bi_end_io = end_clone_bio; | 1476 | bio->bi_end_io = end_clone_bio; |
1509 | bio->bi_private = info; | 1477 | bio->bi_private = info; |
1510 | bio->bi_destructor = dm_rq_bio_destructor; | ||
1511 | 1478 | ||
1512 | return 0; | 1479 | return 0; |
1513 | } | 1480 | } |
@@ -2771,7 +2738,10 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) | |||
2771 | if (!pools->tio_pool) | 2738 | if (!pools->tio_pool) |
2772 | goto free_io_pool_and_out; | 2739 | goto free_io_pool_and_out; |
2773 | 2740 | ||
2774 | pools->bs = bioset_create(pool_size, 0); | 2741 | pools->bs = (type == DM_TYPE_BIO_BASED) ? |
2742 | bioset_create(pool_size, 0) : | ||
2743 | bioset_create(pool_size, | ||
2744 | offsetof(struct dm_rq_clone_bio_info, clone)); | ||
2775 | if (!pools->bs) | 2745 | if (!pools->bs) |
2776 | goto free_tio_pool_and_out; | 2746 | goto free_tio_pool_and_out; |
2777 | 2747 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 308e87b417e0..95c88012a3b9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -155,32 +155,17 @@ static int start_readonly; | |||
155 | * like bio_clone, but with a local bio set | 155 | * like bio_clone, but with a local bio set |
156 | */ | 156 | */ |
157 | 157 | ||
158 | static void mddev_bio_destructor(struct bio *bio) | ||
159 | { | ||
160 | struct mddev *mddev, **mddevp; | ||
161 | |||
162 | mddevp = (void*)bio; | ||
163 | mddev = mddevp[-1]; | ||
164 | |||
165 | bio_free(bio, mddev->bio_set); | ||
166 | } | ||
167 | |||
168 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 158 | struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
169 | struct mddev *mddev) | 159 | struct mddev *mddev) |
170 | { | 160 | { |
171 | struct bio *b; | 161 | struct bio *b; |
172 | struct mddev **mddevp; | ||
173 | 162 | ||
174 | if (!mddev || !mddev->bio_set) | 163 | if (!mddev || !mddev->bio_set) |
175 | return bio_alloc(gfp_mask, nr_iovecs); | 164 | return bio_alloc(gfp_mask, nr_iovecs); |
176 | 165 | ||
177 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, | 166 | b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set); |
178 | mddev->bio_set); | ||
179 | if (!b) | 167 | if (!b) |
180 | return NULL; | 168 | return NULL; |
181 | mddevp = (void*)b; | ||
182 | mddevp[-1] = mddev; | ||
183 | b->bi_destructor = mddev_bio_destructor; | ||
184 | return b; | 169 | return b; |
185 | } | 170 | } |
186 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); | 171 | EXPORT_SYMBOL_GPL(bio_alloc_mddev); |
@@ -188,32 +173,10 @@ EXPORT_SYMBOL_GPL(bio_alloc_mddev); | |||
188 | struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | 173 | struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, |
189 | struct mddev *mddev) | 174 | struct mddev *mddev) |
190 | { | 175 | { |
191 | struct bio *b; | ||
192 | struct mddev **mddevp; | ||
193 | |||
194 | if (!mddev || !mddev->bio_set) | 176 | if (!mddev || !mddev->bio_set) |
195 | return bio_clone(bio, gfp_mask); | 177 | return bio_clone(bio, gfp_mask); |
196 | 178 | ||
197 | b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, | 179 | return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); |
198 | mddev->bio_set); | ||
199 | if (!b) | ||
200 | return NULL; | ||
201 | mddevp = (void*)b; | ||
202 | mddevp[-1] = mddev; | ||
203 | b->bi_destructor = mddev_bio_destructor; | ||
204 | __bio_clone(b, bio); | ||
205 | if (bio_integrity(bio)) { | ||
206 | int ret; | ||
207 | |||
208 | ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); | ||
209 | |||
210 | if (ret < 0) { | ||
211 | bio_put(b); | ||
212 | return NULL; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | return b; | ||
217 | } | 180 | } |
218 | EXPORT_SYMBOL_GPL(bio_clone_mddev); | 181 | EXPORT_SYMBOL_GPL(bio_clone_mddev); |
219 | 182 | ||
@@ -5006,8 +4969,7 @@ int md_run(struct mddev *mddev) | |||
5006 | } | 4969 | } |
5007 | 4970 | ||
5008 | if (mddev->bio_set == NULL) | 4971 | if (mddev->bio_set == NULL) |
5009 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, | 4972 | mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0); |
5010 | sizeof(struct mddev *)); | ||
5011 | 4973 | ||
5012 | spin_lock(&pers_lock); | 4974 | spin_lock(&pers_lock); |
5013 | pers = find_pers(mddev->level, mddev->clevel); | 4975 | pers = find_pers(mddev->level, mddev->clevel); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index de63a1fc3737..a9e4fa95dfaa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -422,6 +422,7 @@ static int raid0_run(struct mddev *mddev) | |||
422 | if (md_check_no_bitmap(mddev)) | 422 | if (md_check_no_bitmap(mddev)) |
423 | return -EINVAL; | 423 | return -EINVAL; |
424 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 424 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
425 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | ||
425 | 426 | ||
426 | /* if private is not null, we are here after takeover */ | 427 | /* if private is not null, we are here after takeover */ |
427 | if (mddev->private == NULL) { | 428 | if (mddev->private == NULL) { |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 29408d46a6d9..57d7674c5013 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -553,14 +553,6 @@ static void iblock_complete_cmd(struct se_cmd *cmd) | |||
553 | kfree(ibr); | 553 | kfree(ibr); |
554 | } | 554 | } |
555 | 555 | ||
556 | static void iblock_bio_destructor(struct bio *bio) | ||
557 | { | ||
558 | struct se_cmd *cmd = bio->bi_private; | ||
559 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | ||
560 | |||
561 | bio_free(bio, ib_dev->ibd_bio_set); | ||
562 | } | ||
563 | |||
564 | static struct bio * | 556 | static struct bio * |
565 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | 557 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) |
566 | { | 558 | { |
@@ -582,7 +574,6 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | |||
582 | 574 | ||
583 | bio->bi_bdev = ib_dev->ibd_bd; | 575 | bio->bi_bdev = ib_dev->ibd_bd; |
584 | bio->bi_private = cmd; | 576 | bio->bi_private = cmd; |
585 | bio->bi_destructor = iblock_bio_destructor; | ||
586 | bio->bi_end_io = &iblock_bio_done; | 577 | bio->bi_end_io = &iblock_bio_done; |
587 | bio->bi_sector = lba; | 578 | bio->bi_sector = lba; |
588 | return bio; | 579 | return bio; |