aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:50 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commitae03bf639a5027d27270123f5f6e3ee6a412781d (patch)
treed705f41a188ad656b1f47f7952626a9f992e3b8f
parente1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff)
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c8
-rw-r--r--block/blk-core.c16
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c27
-rw-r--r--block/blk-settings.c15
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ioctl.c10
-rw-r--r--block/scsi_ioctl.c8
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/scsi/sg.c15
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--fs/bio.c19
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h36
-rw-r--r--mm/bounce.c4
25 files changed, 147 insertions, 97 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d98054cdbd7..30022b4e2f63 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
388 388
389 bio->bi_sector = sector; 389 bio->bi_sector = sector;
390 390
391 if (nr_sects > q->max_hw_sectors) { 391 if (nr_sects > queue_max_hw_sectors(q)) {
392 bio->bi_size = q->max_hw_sectors << 9; 392 bio->bi_size = queue_max_hw_sectors(q) << 9;
393 nr_sects -= q->max_hw_sectors; 393 nr_sects -= queue_max_hw_sectors(q);
394 sector += q->max_hw_sectors; 394 sector += queue_max_hw_sectors(q);
395 } else { 395 } else {
396 bio->bi_size = nr_sects << 9; 396 bio->bi_size = nr_sects << 9;
397 nr_sects = 0; 397 nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 59c4af523112..7a4c40184a64 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
1437 goto end_io; 1437 goto end_io;
1438 } 1438 }
1439 1439
1440 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1440 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1442 bdevname(bio->bi_bdev, b), 1442 bdevname(bio->bi_bdev, b),
1443 bio_sectors(bio), 1443 bio_sectors(bio),
1444 q->max_hw_sectors); 1444 queue_max_hw_sectors(q));
1445 goto end_io; 1445 goto end_io;
1446 } 1446 }
1447 1447
@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
1608 */ 1608 */
1609int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1609int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1610{ 1610{
1611 if (blk_rq_sectors(rq) > q->max_sectors || 1611 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1612 blk_rq_bytes(rq) > q->max_hw_sectors << 9) { 1612 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1613 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1613 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1614 return -EIO; 1614 return -EIO;
1615 } 1615 }
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1621 * limitation. 1621 * limitation.
1622 */ 1622 */
1623 blk_recalc_rq_segments(rq); 1623 blk_recalc_rq_segments(rq);
1624 if (rq->nr_phys_segments > q->max_phys_segments || 1624 if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
1625 rq->nr_phys_segments > q->max_hw_segments) { 1625 rq->nr_phys_segments > queue_max_hw_segments(q)) {
1626 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1626 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1627 return -EIO; 1627 return -EIO;
1628 } 1628 }
diff --git a/block/blk-map.c b/block/blk-map.c
index ef2492adca7e..9083cf0180cc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
115 struct bio *bio = NULL; 115 struct bio *bio = NULL;
116 int ret; 116 int ret;
117 117
118 if (len > (q->max_hw_sectors << 9)) 118 if (len > (queue_max_hw_sectors(q) << 9))
119 return -EINVAL; 119 return -EINVAL;
120 if (!len) 120 if (!len)
121 return -EINVAL; 121 return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292 struct bio *bio; 292 struct bio *bio;
293 int ret; 293 int ret;
294 294
295 if (len > (q->max_hw_sectors << 9)) 295 if (len > (queue_max_hw_sectors(q) << 9))
296 return -EINVAL; 296 return -EINVAL;
297 if (!len || !kbuf) 297 if (!len || !kbuf)
298 return -EINVAL; 298 return -EINVAL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4974dd5767e5..39ce64432ba6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
32 * never considered part of another segment, since that 32 * never considered part of another segment, since that
33 * might change with the bounce page. 33 * might change with the bounce page.
34 */ 34 */
35 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 35 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
36 if (high || highprv) 36 if (high || highprv)
37 goto new_segment; 37 goto new_segment;
38 if (cluster) { 38 if (cluster) {
39 if (seg_size + bv->bv_len > q->max_segment_size) 39 if (seg_size + bv->bv_len
40 > queue_max_segment_size(q))
40 goto new_segment; 41 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 42 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42 goto new_segment; 43 goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
91 return 0; 92 return 0;
92 93
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 94 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94 q->max_segment_size) 95 queue_max_segment_size(q))
95 return 0; 96 return 0;
96 97
97 if (!bio_has_data(bio)) 98 if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
134 int nbytes = bvec->bv_len; 135 int nbytes = bvec->bv_len;
135 136
136 if (bvprv && cluster) { 137 if (bvprv && cluster) {
137 if (sg->length + nbytes > q->max_segment_size) 138 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment; 139 goto new_segment;
139 140
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 141 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
205{ 206{
206 int nr_phys_segs = bio_phys_segments(q, bio); 207 int nr_phys_segs = bio_phys_segments(q, bio);
207 208
208 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments 209 if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
209 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 210 req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
210 req->cmd_flags |= REQ_NOMERGE; 211 req->cmd_flags |= REQ_NOMERGE;
211 if (req == q->last_merge) 212 if (req == q->last_merge)
212 q->last_merge = NULL; 213 q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
227 unsigned short max_sectors; 228 unsigned short max_sectors;
228 229
229 if (unlikely(blk_pc_request(req))) 230 if (unlikely(blk_pc_request(req)))
230 max_sectors = q->max_hw_sectors; 231 max_sectors = queue_max_hw_sectors(q);
231 else 232 else
232 max_sectors = q->max_sectors; 233 max_sectors = queue_max_sectors(q);
233 234
234 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 235 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
235 req->cmd_flags |= REQ_NOMERGE; 236 req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
251 unsigned short max_sectors; 252 unsigned short max_sectors;
252 253
253 if (unlikely(blk_pc_request(req))) 254 if (unlikely(blk_pc_request(req)))
254 max_sectors = q->max_hw_sectors; 255 max_sectors = queue_max_hw_sectors(q);
255 else 256 else
256 max_sectors = q->max_sectors; 257 max_sectors = queue_max_sectors(q);
257 258
258 259
259 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 260 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
287 /* 288 /*
288 * Will it become too large? 289 * Will it become too large?
289 */ 290 */
290 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) 291 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
291 return 0; 292 return 0;
292 293
293 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 294 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
299 total_phys_segments--; 300 total_phys_segments--;
300 } 301 }
301 302
302 if (total_phys_segments > q->max_phys_segments) 303 if (total_phys_segments > queue_max_phys_segments(q))
303 return 0; 304 return 0;
304 305
305 if (total_phys_segments > q->max_hw_segments) 306 if (total_phys_segments > queue_max_hw_segments(q))
306 return 0; 307 return 0;
307 308
308 /* Merge is OK... */ 309 /* Merge is OK... */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 15c3164537b8..0b32f984eed2 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
219} 219}
220EXPORT_SYMBOL(blk_queue_max_sectors); 220EXPORT_SYMBOL(blk_queue_max_sectors);
221 221
222void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223{
224 if (BLK_DEF_MAX_SECTORS > max_sectors)
225 q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
226 else
227 q->max_hw_sectors = max_sectors;
228}
229EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230
222/** 231/**
223 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 232 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
224 * @q: the request queue for the device 233 * @q: the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
395 dma_drain_needed_fn *dma_drain_needed, 404 dma_drain_needed_fn *dma_drain_needed,
396 void *buf, unsigned int size) 405 void *buf, unsigned int size)
397{ 406{
398 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 407 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
399 return -EINVAL; 408 return -EINVAL;
400 /* make room for appending the drain */ 409 /* make room for appending the drain */
401 --q->max_hw_segments; 410 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
402 --q->max_phys_segments; 411 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
403 q->dma_drain_needed = dma_drain_needed; 412 q->dma_drain_needed = dma_drain_needed;
404 q->dma_drain_buffer = buf; 413 q->dma_drain_buffer = buf;
405 q->dma_drain_size = size; 414 q->dma_drain_size = size;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 13d38b7e4d0f..142a4acddd43 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
95 95
96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
97{ 97{
98 int max_sectors_kb = q->max_sectors >> 1; 98 int max_sectors_kb = queue_max_sectors(q) >> 1;
99 99
100 return queue_var_show(max_sectors_kb, (page)); 100 return queue_var_show(max_sectors_kb, (page));
101} 101}
@@ -109,7 +109,7 @@ static ssize_t
109queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 109queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
110{ 110{
111 unsigned long max_sectors_kb, 111 unsigned long max_sectors_kb,
112 max_hw_sectors_kb = q->max_hw_sectors >> 1, 112 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
113 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 113 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
114 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 114 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
115 115
@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
117 return -EINVAL; 117 return -EINVAL;
118 118
119 spin_lock_irq(q->queue_lock); 119 spin_lock_irq(q->queue_lock);
120 q->max_sectors = max_sectors_kb << 1; 120 blk_queue_max_sectors(q, max_sectors_kb << 1);
121 spin_unlock_irq(q->queue_lock); 121 spin_unlock_irq(q->queue_lock);
122 122
123 return ret; 123 return ret;
@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
125 125
126static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 126static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
127{ 127{
128 int max_hw_sectors_kb = q->max_hw_sectors >> 1; 128 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
129 129
130 return queue_var_show(max_hw_sectors_kb, (page)); 130 return queue_var_show(max_hw_sectors_kb, (page));
131} 131}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 9eaa1940273a..df18a156d011 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
766 return compat_put_int(arg, bdev_logical_block_size(bdev)); 766 return compat_put_int(arg, bdev_logical_block_size(bdev));
767 case BLKSECTGET: 767 case BLKSECTGET:
768 return compat_put_ushort(arg, 768 return compat_put_ushort(arg,
769 bdev_get_queue(bdev)->max_sectors); 769 queue_max_sectors(bdev_get_queue(bdev)));
770 case BLKRASET: /* compatible, but no compat_ptr (!) */ 770 case BLKRASET: /* compatible, but no compat_ptr (!) */
771 case BLKFRASET: 771 case BLKFRASET:
772 if (!capable(CAP_SYS_ADMIN)) 772 if (!capable(CAP_SYS_ADMIN))
diff --git a/block/ioctl.c b/block/ioctl.c
index 7aa97f65da82..500e4c73cc52 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
152 bio->bi_private = &wait; 152 bio->bi_private = &wait;
153 bio->bi_sector = start; 153 bio->bi_sector = start;
154 154
155 if (len > q->max_hw_sectors) { 155 if (len > queue_max_hw_sectors(q)) {
156 bio->bi_size = q->max_hw_sectors << 9; 156 bio->bi_size = queue_max_hw_sectors(q) << 9;
157 len -= q->max_hw_sectors; 157 len -= queue_max_hw_sectors(q);
158 start += q->max_hw_sectors; 158 start += queue_max_hw_sectors(q);
159 } else { 159 } else {
160 bio->bi_size = len << 9; 160 bio->bi_size = len << 9;
161 len = 0; 161 len = 0;
@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
313 case BLKSSZGET: /* get block device hardware sector size */ 313 case BLKSSZGET: /* get block device hardware sector size */
314 return put_int(arg, bdev_logical_block_size(bdev)); 314 return put_int(arg, bdev_logical_block_size(bdev));
315 case BLKSECTGET: 315 case BLKSECTGET:
316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 316 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
317 case BLKRASET: 317 case BLKRASET:
318 case BLKFRASET: 318 case BLKFRASET:
319 if(!capable(CAP_SYS_ADMIN)) 319 if(!capable(CAP_SYS_ADMIN))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a9670dd4b5de..5f8e798ede4e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
75 75
76static int sg_get_reserved_size(struct request_queue *q, int __user *p) 76static int sg_get_reserved_size(struct request_queue *q, int __user *p)
77{ 77{
78 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 78 unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
79 79
80 return put_user(val, p); 80 return put_user(val, p);
81} 81}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
89 89
90 if (size < 0) 90 if (size < 0)
91 return -EINVAL; 91 return -EINVAL;
92 if (size > (q->max_sectors << 9)) 92 if (size > (queue_max_sectors(q) << 9))
93 size = q->max_sectors << 9; 93 size = queue_max_sectors(q) << 9;
94 94
95 q->sg_reserved_size = size; 95 q->sg_reserved_size = size;
96 return 0; 96 return 0;
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
264 if (hdr->cmd_len > BLK_MAX_CDB) 264 if (hdr->cmd_len > BLK_MAX_CDB)
265 return -EINVAL; 265 return -EINVAL;
266 266
267 if (hdr->dxfer_len > (q->max_hw_sectors << 9)) 267 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
268 return -EIO; 268 return -EIO;
269 269
270 if (hdr->dxfer_len) 270 if (hdr->dxfer_len)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 293f5858921d..d57f11759480 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
991 */ 991 */
992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
993{ 993{
994 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 994 if ((pd->settings.size << 9) / CD_FRAMESIZE
995 <= queue_max_phys_segments(q)) {
995 /* 996 /*
996 * The cdrom device can handle one segment/frame 997 * The cdrom device can handle one segment/frame
997 */ 998 */
998 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 999 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
999 return 0; 1000 return 0;
1000 } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { 1001 } else if ((pd->settings.size << 9) / PAGE_SIZE
1002 <= queue_max_phys_segments(q)) {
1001 /* 1003 /*
1002 * We can handle this case at the expense of some extra memory 1004 * We can handle this case at the expense of some extra memory
1003 * copies during write operations 1005 * copies during write operations
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef28..71d1b9bab70b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2101 nr = nframes; 2101 nr = nframes;
2102 if (cdi->cdda_method == CDDA_BPC_SINGLE) 2102 if (cdi->cdda_method == CDDA_BPC_SINGLE)
2103 nr = 1; 2103 nr = 1;
2104 if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) 2104 if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
2105 nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; 2105 nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
2106 2106
2107 len = nr * CD_FRAMESIZE_RAW; 2107 len = nr * CD_FRAMESIZE_RAW;
2108 2108
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 65e2d9759857..e9a73bb242b0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
510 * combine_restrictions_low() 510 * combine_restrictions_low()
511 */ 511 */
512 rs->max_sectors = 512 rs->max_sectors =
513 min_not_zero(rs->max_sectors, q->max_sectors); 513 min_not_zero(rs->max_sectors, queue_max_sectors(q));
514 514
515 /* 515 /*
516 * Check if merge fn is supported. 516 * Check if merge fn is supported.
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
525 525
526 rs->max_phys_segments = 526 rs->max_phys_segments =
527 min_not_zero(rs->max_phys_segments, 527 min_not_zero(rs->max_phys_segments,
528 q->max_phys_segments); 528 queue_max_phys_segments(q));
529 529
530 rs->max_hw_segments = 530 rs->max_hw_segments =
531 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
532 532
533 rs->logical_block_size = max(rs->logical_block_size, 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q)); 534 queue_logical_block_size(q));
535 535
536 rs->max_segment_size = 536 rs->max_segment_size =
537 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
538 538
539 rs->max_hw_sectors = 539 rs->max_hw_sectors =
540 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); 540 min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
541 541
542 rs->seg_boundary_mask = 542 rs->seg_boundary_mask =
543 min_not_zero(rs->seg_boundary_mask, 543 min_not_zero(rs->seg_boundary_mask,
544 q->seg_boundary_mask); 544 queue_segment_boundary(q));
545 545
546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); 546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
547 547
548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
549} 549}
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914 * restrictions. 914 * restrictions.
915 */ 915 */
916 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
917 q->max_phys_segments = t->limits.max_phys_segments; 917 blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
918 q->max_hw_segments = t->limits.max_hw_segments; 918 blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
919 q->logical_block_size = t->limits.logical_block_size; 919 blk_queue_logical_block_size(q, t->limits.logical_block_size);
920 q->max_segment_size = t->limits.max_segment_size; 920 blk_queue_max_segment_size(q, t->limits.max_segment_size);
921 q->max_hw_sectors = t->limits.max_hw_sectors; 921 blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
922 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
923 q->bounce_pfn = t->limits.bounce_pfn; 923 blk_queue_bounce_limit(q, t->limits.bounce_pfn);
924 924
925 if (t->limits.no_cluster) 925 if (t->limits.no_cluster)
926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a1..64f1f3e046e0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
146 * a one page request is never in violation. 146 * a one page request is never in violation.
147 */ 147 */
148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 149 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151 151
152 disk->num_sectors = rdev->sectors; 152 disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe823..4ee31aa13c40 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
303 * merge_bvec_fn will be involved in multipath.) 303 * merge_bvec_fn will be involved in multipath.)
304 */ 304 */
305 if (q->merge_bvec_fn && 305 if (q->merge_bvec_fn &&
306 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 306 queue_max_sectors(q) > (PAGE_SIZE>>9))
307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
308 308
309 conf->working_disks++; 309 conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
467 * violating it, not that we ever expect a device with 467 * violating it, not that we ever expect a device with
468 * a merge_bvec_fn to be involved in multipath */ 468 * a merge_bvec_fn to be involved in multipath */
469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
470 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 470 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
472 472
473 if (!test_bit(Faulty, &rdev->flags)) 473 if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be55..925507e7d673 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
144 */ 144 */
145 145
146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
147 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 147 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
149 149
150 if (!smallest || (rdev1->sectors < smallest->sectors)) 150 if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde1..e23758b4a34e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
1131 */ 1131 */
1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1133 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1133 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1135 1135
1136 p->head_position = 0; 1136 p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
1996 * a one page request is never in violation. 1996 * a one page request is never in violation.
1997 */ 1997 */
1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1999 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1999 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2001 2001
2002 disk->head_position = 0; 2002 disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44b..750550c1166f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
1159 */ 1159 */
1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1161 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1161 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1162 mddev->queue->max_sectors = (PAGE_SIZE>>9); 1162 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1163 1163
1164 p->head_position = 0; 1164 p->head_position = 0;
1165 rdev->raid_disk = mirror; 1165 rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
2145 * a one page request is never in violation. 2145 * a one page request is never in violation.
2146 */ 2146 */
2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2148 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2148 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2149 mddev->queue->max_sectors = (PAGE_SIZE>>9); 2149 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2150 2150
2151 disk->head_position = 0; 2151 disk->head_position = 0;
2152 } 2152 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4616bc3a6e71..7970dc8c522e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
3463{ 3463{
3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3465 3465
3466 if ((bi->bi_size>>9) > q->max_sectors) 3466 if ((bi->bi_size>>9) > queue_max_sectors(q))
3467 return 0; 3467 return 0;
3468 blk_recount_segments(q, bi); 3468 blk_recount_segments(q, bi);
3469 if (bi->bi_phys_segments > q->max_phys_segments) 3469 if (bi->bi_phys_segments > queue_max_phys_segments(q))
3470 return 0; 3470 return 0;
3471 3471
3472 if (q->merge_bvec_fn) 3472 if (q->merge_bvec_fn)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0fc2c0ae7691..9bd407fa98e4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 q->max_phys_segments); 293 queue_max_phys_segments(q));
294 } 294 }
295 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp; 296 filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
909 if (val < 0) 909 if (val < 0)
910 return -EINVAL; 910 return -EINVAL;
911 val = min_t(int, val, 911 val = min_t(int, val,
912 sdp->device->request_queue->max_sectors * 512); 912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) { 913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called) 914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY; 915 return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
919 return 0; 919 return 0;
920 case SG_GET_RESERVED_SIZE: 920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen, 921 val = min_t(int, sfp->reserve.bufflen,
922 sdp->device->request_queue->max_sectors * 512); 922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip); 923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q: 924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip); 925 result = get_user(val, ip);
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 return -ENODEV; 1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p); 1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET: 1061 case BLKSECTGET:
1062 return put_user(sdp->device->request_queue->max_sectors * 512, 1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip); 1063 ip);
1064 case BLKTRACESETUP: 1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue, 1065 return blk_trace_setup(sdp->device->request_queue,
@@ -1377,7 +1377,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1377 sdp->device = scsidp; 1377 sdp->device = scsidp;
1378 INIT_LIST_HEAD(&sdp->sfds); 1378 INIT_LIST_HEAD(&sdp->sfds);
1379 init_waitqueue_head(&sdp->o_excl_wait); 1379 init_waitqueue_head(&sdp->o_excl_wait);
1380 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1380 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1381 queue_max_phys_segments(q));
1381 sdp->index = k; 1382 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1383 kref_init(&sdp->d_ref);
1383 1384
@@ -2055,7 +2056,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2055 sg_big_buff = def_reserved_size; 2056 sg_big_buff = def_reserved_size;
2056 2057
2057 bufflen = min_t(int, sg_big_buff, 2058 bufflen = min_t(int, sg_big_buff,
2058 sdp->device->request_queue->max_sectors * 512); 2059 queue_max_sectors(sdp->device->request_queue) * 512);
2059 sg_build_reserve(sfp, bufflen); 2060 sg_build_reserve(sfp, bufflen);
2060 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2061 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2061 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2062 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 8681b708344f..89bd438e1fe3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(SDp->request_queue->max_hw_segments, 3986 i = min(queue_max_hw_segments(SDp->request_queue),
3987 SDp->request_queue->max_phys_segments); 3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b5860643..cfa26d56ce60 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
132 132
133 if (us->fflags & US_FL_MAX_SECTORS_MIN) 133 if (us->fflags & US_FL_MAX_SECTORS_MIN)
134 max_sectors = PAGE_CACHE_SIZE >> 9; 134 max_sectors = PAGE_CACHE_SIZE >> 9;
135 if (sdev->request_queue->max_sectors > max_sectors) 135 if (queue_max_sectors(sdev->request_queue) > max_sectors)
136 blk_queue_max_sectors(sdev->request_queue, 136 blk_queue_max_sectors(sdev->request_queue,
137 max_sectors); 137 max_sectors);
138 } else if (sdev->type == TYPE_TAPE) { 138 } else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
483{ 483{
484 struct scsi_device *sdev = to_scsi_device(dev); 484 struct scsi_device *sdev = to_scsi_device(dev);
485 485
486 return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); 486 return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
487} 487}
488 488
489/* Input routine for the sysfs max_sectors file */ 489/* Input routine for the sysfs max_sectors file */
diff --git a/fs/bio.c b/fs/bio.c
index 4445c3821730..ab423a1024ab 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -499,11 +499,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
499 struct request_queue *q = bdev_get_queue(bdev); 499 struct request_queue *q = bdev_get_queue(bdev);
500 int nr_pages; 500 int nr_pages;
501 501
502 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 502 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
503 if (nr_pages > q->max_phys_segments) 503 if (nr_pages > queue_max_phys_segments(q))
504 nr_pages = q->max_phys_segments; 504 nr_pages = queue_max_phys_segments(q);
505 if (nr_pages > q->max_hw_segments) 505 if (nr_pages > queue_max_hw_segments(q))
506 nr_pages = q->max_hw_segments; 506 nr_pages = queue_max_hw_segments(q);
507 507
508 return nr_pages; 508 return nr_pages;
509} 509}
@@ -562,8 +562,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
562 * make this too complex. 562 * make this too complex.
563 */ 563 */
564 564
565 while (bio->bi_phys_segments >= q->max_phys_segments 565 while (bio->bi_phys_segments >= queue_max_phys_segments(q)
566 || bio->bi_phys_segments >= q->max_hw_segments) { 566 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
567 567
568 if (retried_segments) 568 if (retried_segments)
569 return 0; 569 return 0;
@@ -634,7 +634,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
634int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 634int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
635 unsigned int len, unsigned int offset) 635 unsigned int len, unsigned int offset)
636{ 636{
637 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 637 return __bio_add_page(q, bio, page, len, offset,
638 queue_max_hw_sectors(q));
638} 639}
639 640
640/** 641/**
@@ -654,7 +655,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
654 unsigned int offset) 655 unsigned int offset)
655{ 656{
656 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 657 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
657 return __bio_add_page(q, bio, page, len, offset, q->max_sectors); 658 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
658} 659}
659 660
660struct bio_map_data { 661struct bio_map_data {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d30ec6f30dd7..12737be58601 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
280 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 280 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) 282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
283#define BIO_SEG_BOUNDARY(q, b1, b2) \ 283#define BIO_SEG_BOUNDARY(q, b1, b2) \
284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
285 285
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 872b78b7a101..29b48f7b4ba8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -898,6 +898,7 @@ extern void blk_cleanup_queue(struct request_queue *);
898extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 898extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
899extern void blk_queue_bounce_limit(struct request_queue *, u64); 899extern void blk_queue_bounce_limit(struct request_queue *, u64);
900extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 900extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
901extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
901extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 902extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
902extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 903extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
903extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 904extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
@@ -988,6 +989,41 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
988 989
989#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 990#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
990 991
992static inline unsigned long queue_bounce_pfn(struct request_queue *q)
993{
994 return q->bounce_pfn;
995}
996
997static inline unsigned long queue_segment_boundary(struct request_queue *q)
998{
999 return q->seg_boundary_mask;
1000}
1001
1002static inline unsigned int queue_max_sectors(struct request_queue *q)
1003{
1004 return q->max_sectors;
1005}
1006
1007static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1008{
1009 return q->max_hw_sectors;
1010}
1011
1012static inline unsigned short queue_max_hw_segments(struct request_queue *q)
1013{
1014 return q->max_hw_segments;
1015}
1016
1017static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1018{
1019 return q->max_phys_segments;
1020}
1021
1022static inline unsigned int queue_max_segment_size(struct request_queue *q)
1023{
1024 return q->max_segment_size;
1025}
1026
991static inline unsigned short queue_logical_block_size(struct request_queue *q) 1027static inline unsigned short queue_logical_block_size(struct request_queue *q)
992{ 1028{
993 int retval = 512; 1029 int retval = 512;
diff --git a/mm/bounce.c b/mm/bounce.c
index e590272fe7a8..8dcd4315e01c 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -192,7 +192,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
192 /* 192 /*
193 * is destination page below bounce pfn? 193 * is destination page below bounce pfn?
194 */ 194 */
195 if (page_to_pfn(page) <= q->bounce_pfn) 195 if (page_to_pfn(page) <= queue_bounce_pfn(q))
196 continue; 196 continue;
197 197
198 /* 198 /*
@@ -284,7 +284,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
284 * don't waste time iterating over bio segments 284 * don't waste time iterating over bio segments
285 */ 285 */
286 if (!(q->bounce_gfp & GFP_DMA)) { 286 if (!(q->bounce_gfp & GFP_DMA)) {
287 if (q->bounce_pfn >= blk_max_pfn) 287 if (queue_bounce_pfn(q) >= blk_max_pfn)
288 return; 288 return;
289 pool = page_pool; 289 pool = page_pool;
290 } else { 290 } else {