diff options
-rw-r--r-- | block/blk-core.c | 19 | ||||
-rw-r--r-- | block/blk-merge.c | 159 | ||||
-rw-r--r-- | block/blk-mq.c | 4 | ||||
-rw-r--r-- | block/blk-sysfs.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 2 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 6 | ||||
-rw-r--r-- | drivers/block/ps3vram.c | 2 | ||||
-rw-r--r-- | drivers/block/rsxx/dev.c | 2 | ||||
-rw-r--r-- | drivers/block/umem.c | 2 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/s390/block/dcssblk.c | 2 | ||||
-rw-r--r-- | drivers/s390/block/xpram.c | 2 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/llite/lloop.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 3 |
16 files changed, 192 insertions, 22 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d1796b54e97a..60912e983f16 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -643,6 +643,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
643 | if (q->id < 0) | 643 | if (q->id < 0) |
644 | goto fail_q; | 644 | goto fail_q; |
645 | 645 | ||
646 | q->bio_split = bioset_create(BIO_POOL_SIZE, 0); | ||
647 | if (!q->bio_split) | ||
648 | goto fail_id; | ||
649 | |||
646 | q->backing_dev_info.ra_pages = | 650 | q->backing_dev_info.ra_pages = |
647 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 651 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
648 | q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; | 652 | q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; |
@@ -651,7 +655,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
651 | 655 | ||
652 | err = bdi_init(&q->backing_dev_info); | 656 | err = bdi_init(&q->backing_dev_info); |
653 | if (err) | 657 | if (err) |
654 | goto fail_id; | 658 | goto fail_split; |
655 | 659 | ||
656 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 660 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
657 | laptop_mode_timer_fn, (unsigned long) q); | 661 | laptop_mode_timer_fn, (unsigned long) q); |
@@ -693,6 +697,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
693 | 697 | ||
694 | fail_bdi: | 698 | fail_bdi: |
695 | bdi_destroy(&q->backing_dev_info); | 699 | bdi_destroy(&q->backing_dev_info); |
700 | fail_split: | ||
701 | bioset_free(q->bio_split); | ||
696 | fail_id: | 702 | fail_id: |
697 | ida_simple_remove(&blk_queue_ida, q->id); | 703 | ida_simple_remove(&blk_queue_ida, q->id); |
698 | fail_q: | 704 | fail_q: |
@@ -1610,6 +1616,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1610 | struct request *req; | 1616 | struct request *req; |
1611 | unsigned int request_count = 0; | 1617 | unsigned int request_count = 0; |
1612 | 1618 | ||
1619 | blk_queue_split(q, &bio, q->bio_split); | ||
1620 | |||
1613 | /* | 1621 | /* |
1614 | * low level driver can indicate that it wants pages above a | 1622 | * low level driver can indicate that it wants pages above a |
1615 | * certain limit bounced to low memory (ie for highmem, or even | 1623 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1832,15 +1840,6 @@ generic_make_request_checks(struct bio *bio) | |||
1832 | goto end_io; | 1840 | goto end_io; |
1833 | } | 1841 | } |
1834 | 1842 | ||
1835 | if (likely(bio_is_rw(bio) && | ||
1836 | nr_sectors > queue_max_hw_sectors(q))) { | ||
1837 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
1838 | bdevname(bio->bi_bdev, b), | ||
1839 | bio_sectors(bio), | ||
1840 | queue_max_hw_sectors(q)); | ||
1841 | goto end_io; | ||
1842 | } | ||
1843 | |||
1844 | part = bio->bi_bdev->bd_part; | 1843 | part = bio->bi_bdev->bd_part; |
1845 | if (should_fail_request(part, bio->bi_iter.bi_size) || | 1844 | if (should_fail_request(part, bio->bi_iter.bi_size) || |
1846 | should_fail_request(&part_to_disk(part)->part0, | 1845 | should_fail_request(&part_to_disk(part)->part0, |
diff --git a/block/blk-merge.c b/block/blk-merge.c index a455b9860143..d9c3a75e4a60 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -9,12 +9,158 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static struct bio *blk_bio_discard_split(struct request_queue *q, | ||
13 | struct bio *bio, | ||
14 | struct bio_set *bs) | ||
15 | { | ||
16 | unsigned int max_discard_sectors, granularity; | ||
17 | int alignment; | ||
18 | sector_t tmp; | ||
19 | unsigned split_sectors; | ||
20 | |||
21 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | ||
22 | granularity = max(q->limits.discard_granularity >> 9, 1U); | ||
23 | |||
24 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | ||
25 | max_discard_sectors -= max_discard_sectors % granularity; | ||
26 | |||
27 | if (unlikely(!max_discard_sectors)) { | ||
28 | /* XXX: warn */ | ||
29 | return NULL; | ||
30 | } | ||
31 | |||
32 | if (bio_sectors(bio) <= max_discard_sectors) | ||
33 | return NULL; | ||
34 | |||
35 | split_sectors = max_discard_sectors; | ||
36 | |||
37 | /* | ||
38 | * If the next starting sector would be misaligned, stop the discard at | ||
39 | * the previous aligned sector. | ||
40 | */ | ||
41 | alignment = (q->limits.discard_alignment >> 9) % granularity; | ||
42 | |||
43 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | ||
44 | tmp = sector_div(tmp, granularity); | ||
45 | |||
46 | if (split_sectors > tmp) | ||
47 | split_sectors -= tmp; | ||
48 | |||
49 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | ||
50 | } | ||
51 | |||
52 | static struct bio *blk_bio_write_same_split(struct request_queue *q, | ||
53 | struct bio *bio, | ||
54 | struct bio_set *bs) | ||
55 | { | ||
56 | if (!q->limits.max_write_same_sectors) | ||
57 | return NULL; | ||
58 | |||
59 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | ||
60 | return NULL; | ||
61 | |||
62 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | ||
63 | } | ||
64 | |||
65 | static struct bio *blk_bio_segment_split(struct request_queue *q, | ||
66 | struct bio *bio, | ||
67 | struct bio_set *bs) | ||
68 | { | ||
69 | struct bio *split; | ||
70 | struct bio_vec bv, bvprv; | ||
71 | struct bvec_iter iter; | ||
72 | unsigned seg_size = 0, nsegs = 0; | ||
73 | int prev = 0; | ||
74 | |||
75 | struct bvec_merge_data bvm = { | ||
76 | .bi_bdev = bio->bi_bdev, | ||
77 | .bi_sector = bio->bi_iter.bi_sector, | ||
78 | .bi_size = 0, | ||
79 | .bi_rw = bio->bi_rw, | ||
80 | }; | ||
81 | |||
82 | bio_for_each_segment(bv, bio, iter) { | ||
83 | if (q->merge_bvec_fn && | ||
84 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | ||
85 | goto split; | ||
86 | |||
87 | bvm.bi_size += bv.bv_len; | ||
88 | |||
89 | if (bvm.bi_size >> 9 > queue_max_sectors(q)) | ||
90 | goto split; | ||
91 | |||
92 | /* | ||
93 | * If the queue doesn't support SG gaps and adding this | ||
94 | * offset would create a gap, disallow it. | ||
95 | */ | ||
96 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && | ||
97 | prev && bvec_gap_to_prev(&bvprv, bv.bv_offset)) | ||
98 | goto split; | ||
99 | |||
100 | if (prev && blk_queue_cluster(q)) { | ||
101 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | ||
102 | goto new_segment; | ||
103 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | ||
104 | goto new_segment; | ||
105 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | ||
106 | goto new_segment; | ||
107 | |||
108 | seg_size += bv.bv_len; | ||
109 | bvprv = bv; | ||
110 | prev = 1; | ||
111 | continue; | ||
112 | } | ||
113 | new_segment: | ||
114 | if (nsegs == queue_max_segments(q)) | ||
115 | goto split; | ||
116 | |||
117 | nsegs++; | ||
118 | bvprv = bv; | ||
119 | prev = 1; | ||
120 | seg_size = bv.bv_len; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | split: | ||
125 | split = bio_clone_bioset(bio, GFP_NOIO, bs); | ||
126 | |||
127 | split->bi_iter.bi_size -= iter.bi_size; | ||
128 | bio->bi_iter = iter; | ||
129 | |||
130 | if (bio_integrity(bio)) { | ||
131 | bio_integrity_advance(bio, split->bi_iter.bi_size); | ||
132 | bio_integrity_trim(split, 0, bio_sectors(split)); | ||
133 | } | ||
134 | |||
135 | return split; | ||
136 | } | ||
137 | |||
138 | void blk_queue_split(struct request_queue *q, struct bio **bio, | ||
139 | struct bio_set *bs) | ||
140 | { | ||
141 | struct bio *split; | ||
142 | |||
143 | if ((*bio)->bi_rw & REQ_DISCARD) | ||
144 | split = blk_bio_discard_split(q, *bio, bs); | ||
145 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) | ||
146 | split = blk_bio_write_same_split(q, *bio, bs); | ||
147 | else | ||
148 | split = blk_bio_segment_split(q, *bio, q->bio_split); | ||
149 | |||
150 | if (split) { | ||
151 | bio_chain(split, *bio); | ||
152 | generic_make_request(*bio); | ||
153 | *bio = split; | ||
154 | } | ||
155 | } | ||
156 | EXPORT_SYMBOL(blk_queue_split); | ||
157 | |||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 158 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio, | 159 | struct bio *bio, |
14 | bool no_sg_merge) | 160 | bool no_sg_merge) |
15 | { | 161 | { |
16 | struct bio_vec bv, bvprv = { NULL }; | 162 | struct bio_vec bv, bvprv = { NULL }; |
17 | int cluster, high, highprv = 1; | 163 | int cluster, prev = 0; |
18 | unsigned int seg_size, nr_phys_segs; | 164 | unsigned int seg_size, nr_phys_segs; |
19 | struct bio *fbio, *bbio; | 165 | struct bio *fbio, *bbio; |
20 | struct bvec_iter iter; | 166 | struct bvec_iter iter; |
@@ -36,7 +182,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
36 | cluster = blk_queue_cluster(q); | 182 | cluster = blk_queue_cluster(q); |
37 | seg_size = 0; | 183 | seg_size = 0; |
38 | nr_phys_segs = 0; | 184 | nr_phys_segs = 0; |
39 | high = 0; | ||
40 | for_each_bio(bio) { | 185 | for_each_bio(bio) { |
41 | bio_for_each_segment(bv, bio, iter) { | 186 | bio_for_each_segment(bv, bio, iter) { |
42 | /* | 187 | /* |
@@ -46,13 +191,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
46 | if (no_sg_merge) | 191 | if (no_sg_merge) |
47 | goto new_segment; | 192 | goto new_segment; |
48 | 193 | ||
49 | /* | 194 | if (prev && cluster) { |
50 | * the trick here is making sure that a high page is | ||
51 | * never considered part of another segment, since | ||
52 | * that might change with the bounce page. | ||
53 | */ | ||
54 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); | ||
55 | if (!high && !highprv && cluster) { | ||
56 | if (seg_size + bv.bv_len | 195 | if (seg_size + bv.bv_len |
57 | > queue_max_segment_size(q)) | 196 | > queue_max_segment_size(q)) |
58 | goto new_segment; | 197 | goto new_segment; |
@@ -72,8 +211,8 @@ new_segment: | |||
72 | 211 | ||
73 | nr_phys_segs++; | 212 | nr_phys_segs++; |
74 | bvprv = bv; | 213 | bvprv = bv; |
214 | prev = 1; | ||
75 | seg_size = bv.bv_len; | 215 | seg_size = bv.bv_len; |
76 | highprv = high; | ||
77 | } | 216 | } |
78 | bbio = bio; | 217 | bbio = bio; |
79 | } | 218 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 94559025c5e6..81edbd95bda8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1287,6 +1287,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1287 | return; | 1287 | return; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | blk_queue_split(q, &bio, q->bio_split); | ||
1291 | |||
1290 | if (!is_flush_fua && !blk_queue_nomerges(q) && | 1292 | if (!is_flush_fua && !blk_queue_nomerges(q) && |
1291 | blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) | 1293 | blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) |
1292 | return; | 1294 | return; |
@@ -1372,6 +1374,8 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
1372 | return; | 1374 | return; |
1373 | } | 1375 | } |
1374 | 1376 | ||
1377 | blk_queue_split(q, &bio, q->bio_split); | ||
1378 | |||
1375 | if (!is_flush_fua && !blk_queue_nomerges(q) && | 1379 | if (!is_flush_fua && !blk_queue_nomerges(q) && |
1376 | blk_attempt_plug_merge(q, bio, &request_count, NULL)) | 1380 | blk_attempt_plug_merge(q, bio, &request_count, NULL)) |
1377 | return; | 1381 | return; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b1f34e463c0f..3e44a9da2a13 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -561,6 +561,9 @@ static void blk_release_queue(struct kobject *kobj) | |||
561 | 561 | ||
562 | blk_trace_shutdown(q); | 562 | blk_trace_shutdown(q); |
563 | 563 | ||
564 | if (q->bio_split) | ||
565 | bioset_free(q->bio_split); | ||
566 | |||
564 | ida_simple_remove(&blk_queue_ida, q->id); | 567 | ida_simple_remove(&blk_queue_ida, q->id); |
565 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | 568 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
566 | } | 569 | } |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 9cb41166366e..923c857b395b 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -1499,6 +1499,8 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1499 | struct drbd_device *device = (struct drbd_device *) q->queuedata; | 1499 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
1500 | unsigned long start_jif; | 1500 | unsigned long start_jif; |
1501 | 1501 | ||
1502 | blk_queue_split(q, &bio, q->bio_split); | ||
1503 | |||
1502 | start_jif = jiffies; | 1504 | start_jif = jiffies; |
1503 | 1505 | ||
1504 | /* | 1506 | /* |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index a7a259e031da..ee7ad5e44632 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2447,6 +2447,10 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2447 | char b[BDEVNAME_SIZE]; | 2447 | char b[BDEVNAME_SIZE]; |
2448 | struct bio *split; | 2448 | struct bio *split; |
2449 | 2449 | ||
2450 | blk_queue_bounce(q, &bio); | ||
2451 | |||
2452 | blk_queue_split(q, &bio, q->bio_split); | ||
2453 | |||
2450 | pd = q->queuedata; | 2454 | pd = q->queuedata; |
2451 | if (!pd) { | 2455 | if (!pd) { |
2452 | pr_err("%s incorrect request queue\n", | 2456 | pr_err("%s incorrect request queue\n", |
@@ -2477,8 +2481,6 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2477 | goto end_io; | 2481 | goto end_io; |
2478 | } | 2482 | } |
2479 | 2483 | ||
2480 | blk_queue_bounce(q, &bio); | ||
2481 | |||
2482 | do { | 2484 | do { |
2483 | sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); | 2485 | sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); |
2484 | sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); | 2486 | sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 49b4706b162c..d89fcac59515 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
@@ -606,6 +606,8 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio) | |||
606 | 606 | ||
607 | dev_dbg(&dev->core, "%s\n", __func__); | 607 | dev_dbg(&dev->core, "%s\n", __func__); |
608 | 608 | ||
609 | blk_queue_split(q, &bio, q->bio_split); | ||
610 | |||
609 | spin_lock_irq(&priv->lock); | 611 | spin_lock_irq(&priv->lock); |
610 | busy = !bio_list_empty(&priv->list); | 612 | busy = !bio_list_empty(&priv->list); |
611 | bio_list_add(&priv->list, bio); | 613 | bio_list_add(&priv->list, bio); |
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 63b9d2ffa8ee..3163e4cdc2cc 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c | |||
@@ -151,6 +151,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) | |||
151 | struct rsxx_bio_meta *bio_meta; | 151 | struct rsxx_bio_meta *bio_meta; |
152 | int st = -EINVAL; | 152 | int st = -EINVAL; |
153 | 153 | ||
154 | blk_queue_split(q, &bio, q->bio_split); | ||
155 | |||
154 | might_sleep(); | 156 | might_sleep(); |
155 | 157 | ||
156 | if (!card) | 158 | if (!card) |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 3b3afd2ec5d6..04d65790a886 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -531,6 +531,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio) | |||
531 | (unsigned long long)bio->bi_iter.bi_sector, | 531 | (unsigned long long)bio->bi_iter.bi_sector, |
532 | bio->bi_iter.bi_size); | 532 | bio->bi_iter.bi_size); |
533 | 533 | ||
534 | blk_queue_split(q, &bio, q->bio_split); | ||
535 | |||
534 | spin_lock_irq(&card->lock); | 536 | spin_lock_irq(&card->lock); |
535 | *card->biotail = bio; | 537 | *card->biotail = bio; |
536 | bio->bi_next = NULL; | 538 | bio->bi_next = NULL; |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 68c3d4800464..aec781acee9d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -900,6 +900,8 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio) | |||
900 | if (unlikely(!zram_meta_get(zram))) | 900 | if (unlikely(!zram_meta_get(zram))) |
901 | goto error; | 901 | goto error; |
902 | 902 | ||
903 | blk_queue_split(queue, &bio, queue->bio_split); | ||
904 | |||
903 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, | 905 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
904 | bio->bi_iter.bi_size)) { | 906 | bio->bi_iter.bi_size)) { |
905 | atomic64_inc(&zram->stats.invalid_io); | 907 | atomic64_inc(&zram->stats.invalid_io); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7f367fcace03..069f8d7e890e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1799,6 +1799,8 @@ static void dm_make_request(struct request_queue *q, struct bio *bio) | |||
1799 | 1799 | ||
1800 | map = dm_get_live_table(md, &srcu_idx); | 1800 | map = dm_get_live_table(md, &srcu_idx); |
1801 | 1801 | ||
1802 | blk_queue_split(q, &bio, q->bio_split); | ||
1803 | |||
1802 | generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); | 1804 | generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); |
1803 | 1805 | ||
1804 | /* if we're suspended, we have to queue this io for later */ | 1806 | /* if we're suspended, we have to queue this io for later */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index ac4381a6625c..e1d8723720cc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -257,6 +257,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio) | |||
257 | unsigned int sectors; | 257 | unsigned int sectors; |
258 | int cpu; | 258 | int cpu; |
259 | 259 | ||
260 | blk_queue_split(q, &bio, q->bio_split); | ||
261 | |||
260 | if (mddev == NULL || mddev->pers == NULL | 262 | if (mddev == NULL || mddev->pers == NULL |
261 | || !mddev->ready) { | 263 | || !mddev->ready) { |
262 | bio_io_error(bio); | 264 | bio_io_error(bio); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 8bcb822b0bac..29ea2394c896 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -826,6 +826,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
826 | unsigned long source_addr; | 826 | unsigned long source_addr; |
827 | unsigned long bytes_done; | 827 | unsigned long bytes_done; |
828 | 828 | ||
829 | blk_queue_split(q, &bio, q->bio_split); | ||
830 | |||
829 | bytes_done = 0; | 831 | bytes_done = 0; |
830 | dev_info = bio->bi_bdev->bd_disk->private_data; | 832 | dev_info = bio->bi_bdev->bd_disk->private_data; |
831 | if (dev_info == NULL) | 833 | if (dev_info == NULL) |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 93856b9b6214..02871f1db562 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -190,6 +190,8 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio) | |||
190 | unsigned long page_addr; | 190 | unsigned long page_addr; |
191 | unsigned long bytes; | 191 | unsigned long bytes; |
192 | 192 | ||
193 | blk_queue_split(q, &bio, q->bio_split); | ||
194 | |||
193 | if ((bio->bi_iter.bi_sector & 7) != 0 || | 195 | if ((bio->bi_iter.bi_sector & 7) != 0 || |
194 | (bio->bi_iter.bi_size & 4095) != 0) | 196 | (bio->bi_iter.bi_size & 4095) != 0) |
195 | /* Request is not page-aligned. */ | 197 | /* Request is not page-aligned. */ |
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index cc00fd10fbcf..1e33d540b223 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c | |||
@@ -340,6 +340,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio) | |||
340 | int rw = bio_rw(old_bio); | 340 | int rw = bio_rw(old_bio); |
341 | int inactive; | 341 | int inactive; |
342 | 342 | ||
343 | blk_queue_split(q, &old_bio, q->bio_split); | ||
344 | |||
343 | if (!lo) | 345 | if (!lo) |
344 | goto err; | 346 | goto err; |
345 | 347 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 243f29e779ec..ca778d9c7d81 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -463,6 +463,7 @@ struct request_queue { | |||
463 | 463 | ||
464 | struct blk_mq_tag_set *tag_set; | 464 | struct blk_mq_tag_set *tag_set; |
465 | struct list_head tag_set_list; | 465 | struct list_head tag_set_list; |
466 | struct bio_set *bio_split; | ||
466 | }; | 467 | }; |
467 | 468 | ||
468 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 469 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
@@ -783,6 +784,8 @@ extern void blk_rq_unprep_clone(struct request *rq); | |||
783 | extern int blk_insert_cloned_request(struct request_queue *q, | 784 | extern int blk_insert_cloned_request(struct request_queue *q, |
784 | struct request *rq); | 785 | struct request *rq); |
785 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 786 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
787 | extern void blk_queue_split(struct request_queue *, struct bio **, | ||
788 | struct bio_set *); | ||
786 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 789 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
787 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); | 790 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); |
788 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, | 791 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, |