aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-02-28 13:41:12 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-28 13:41:12 -0500
commit6fc2de06ef1e691d032aa572478c905b7495a274 (patch)
tree778df1440b045d67c997d7bd79ee3c6c11f57638
parent58c24a61614f5da290068e47fc5ec65370eb61dd (diff)
parent8a78362c4eefc1deddbefe2c7f38aabbc2429d6b (diff)
Merge branch 'for-2.6.34-next' into for-2.6.34
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-settings.c105
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/cciss.c7
-rw-r--r--drivers/block/cpqarray.c5
-rw-r--r--drivers/block/drbd/drbd_nl.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c3
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/block/ps3disk.c5
-rw-r--r--drivers/block/ps3vram.c7
-rw-r--r--drivers/block/sunvdc.c5
-rw-r--r--drivers/block/sx8.c3
-rw-r--r--drivers/block/ub.c5
-rw-r--r--drivers/block/viodasd.c5
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/cdrom/viocd.c5
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-floppy.c4
-rw-r--r--drivers/ide/ide-probe.c5
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/memstick/core/mspro_block.c5
-rw-r--r--drivers/message/i2o/i2o_block.c5
-rw-r--r--drivers/mmc/card/queue.c10
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/char/tape_block.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/staging/hv/blkvsc_drv.c5
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--fs/bio.c9
-rw-r--r--include/linux/blkdev.h45
-rw-r--r--include/linux/i2o.h2
55 files changed, 153 insertions, 214 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 5ff554677f40..c1ff6903b622 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
747 ubd_dev->fd = fd; 747 ubd_dev->fd = fd;
748 748
749 if(ubd_dev->cow.file != NULL){ 749 if(ubd_dev->cow.file != NULL){
750 blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long)); 750 blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
751 751
752 err = -ENOMEM; 752 err = -ENOMEM;
753 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); 753 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
@@ -849,7 +849,7 @@ static int ubd_add(int n, char **error_out)
849 } 849 }
850 ubd_dev->queue->queuedata = ubd_dev; 850 ubd_dev->queue->queuedata = ubd_dev;
851 851
852 blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); 852 blk_queue_max_segments(ubd_dev->queue, MAX_SG);
853 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); 853 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
854 if(err){ 854 if(err){
855 *error_out = "Failed to register device"; 855 *error_out = "Failed to register device";
diff --git a/block/blk-core.c b/block/blk-core.c
index 36c0deebc2dc..9fe174dc74d1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1614,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1614 * limitation. 1614 * limitation.
1615 */ 1615 */
1616 blk_recalc_rq_segments(rq); 1616 blk_recalc_rq_segments(rq);
1617 if (rq->nr_phys_segments > queue_max_phys_segments(q) || 1617 if (rq->nr_phys_segments > queue_max_segments(q)) {
1618 rq->nr_phys_segments > queue_max_hw_segments(q)) {
1619 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1618 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1620 return -EIO; 1619 return -EIO;
1621 } 1620 }
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 99cb5cf1f447..5e7dc9973458 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
206{ 206{
207 int nr_phys_segs = bio_phys_segments(q, bio); 207 int nr_phys_segs = bio_phys_segments(q, bio);
208 208
209 if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || 209 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
210 req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
211 req->cmd_flags |= REQ_NOMERGE; 210 req->cmd_flags |= REQ_NOMERGE;
212 if (req == q->last_merge) 211 if (req == q->last_merge)
213 q->last_merge = NULL; 212 q->last_merge = NULL;
@@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
300 total_phys_segments--; 299 total_phys_segments--;
301 } 300 }
302 301
303 if (total_phys_segments > queue_max_phys_segments(q)) 302 if (total_phys_segments > queue_max_segments(q))
304 return 0;
305
306 if (total_phys_segments > queue_max_hw_segments(q))
307 return 0; 303 return 0;
308 304
309 /* Merge is OK... */ 305 /* Merge is OK... */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 78549c723783..31e7a9375c13 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
91 */ 91 */
92void blk_set_default_limits(struct queue_limits *lim) 92void blk_set_default_limits(struct queue_limits *lim)
93{ 93{
94 lim->max_phys_segments = MAX_PHYS_SEGMENTS; 94 lim->max_segments = BLK_MAX_SEGMENTS;
95 lim->max_hw_segments = MAX_HW_SEGMENTS;
96 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
97 lim->max_segment_size = MAX_SEGMENT_SIZE; 96 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
98 lim->max_sectors = BLK_DEF_MAX_SECTORS; 97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
99 lim->max_hw_sectors = INT_MAX; 98 lim->max_hw_sectors = INT_MAX;
100 lim->max_discard_sectors = 0; 99 lim->max_discard_sectors = 0;
@@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
154 q->unplug_timer.data = (unsigned long)q; 153 q->unplug_timer.data = (unsigned long)q;
155 154
156 blk_set_default_limits(&q->limits); 155 blk_set_default_limits(&q->limits);
157 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 156 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
158 157
159 /* 158 /*
160 * If the caller didn't supply a lock, fall back to our embedded 159 * If the caller didn't supply a lock, fall back to our embedded
@@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
210EXPORT_SYMBOL(blk_queue_bounce_limit); 209EXPORT_SYMBOL(blk_queue_bounce_limit);
211 210
212/** 211/**
213 * blk_queue_max_sectors - set max sectors for a request for this queue 212 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
214 * @q: the request queue for the device 213 * @q: the request queue for the device
215 * @max_sectors: max sectors in the usual 512b unit 214 * @max_hw_sectors: max hardware sectors in the usual 512b unit
216 * 215 *
217 * Description: 216 * Description:
218 * Enables a low level driver to set an upper limit on the size of 217 * Enables a low level driver to set a hard upper limit,
219 * received requests. 218 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
219 * the device driver based upon the combined capabilities of I/O
220 * controller and storage device.
221 *
222 * max_sectors is a soft limit imposed by the block layer for
223 * filesystem type requests. This value can be overridden on a
224 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
225 * The soft limit can not exceed max_hw_sectors.
220 **/ 226 **/
221void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) 227void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
222{ 228{
223 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 229 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
224 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 230 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
225 printk(KERN_INFO "%s: set to minimum %d\n", 231 printk(KERN_INFO "%s: set to minimum %d\n",
226 __func__, max_sectors); 232 __func__, max_hw_sectors);
227 } 233 }
228 234
229 if (BLK_DEF_MAX_SECTORS > max_sectors) 235 q->limits.max_hw_sectors = max_hw_sectors;
230 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; 236 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
231 else { 237 BLK_DEF_MAX_SECTORS);
232 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
233 q->limits.max_hw_sectors = max_sectors;
234 }
235}
236EXPORT_SYMBOL(blk_queue_max_sectors);
237
238void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
239{
240 if (BLK_DEF_MAX_SECTORS > max_sectors)
241 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
242 else
243 q->limits.max_hw_sectors = max_sectors;
244} 238}
245EXPORT_SYMBOL(blk_queue_max_hw_sectors); 239EXPORT_SYMBOL(blk_queue_max_hw_sectors);
246 240
@@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
257EXPORT_SYMBOL(blk_queue_max_discard_sectors); 251EXPORT_SYMBOL(blk_queue_max_discard_sectors);
258 252
259/** 253/**
260 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 254 * blk_queue_max_segments - set max hw segments for a request for this queue
261 * @q: the request queue for the device 255 * @q: the request queue for the device
262 * @max_segments: max number of segments 256 * @max_segments: max number of segments
263 * 257 *
264 * Description: 258 * Description:
265 * Enables a low level driver to set an upper limit on the number of 259 * Enables a low level driver to set an upper limit on the number of
266 * physical data segments in a request. This would be the largest sized 260 * hw data segments in a request.
267 * scatter list the driver could handle.
268 **/ 261 **/
269void blk_queue_max_phys_segments(struct request_queue *q, 262void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
270 unsigned short max_segments)
271{ 263{
272 if (!max_segments) { 264 if (!max_segments) {
273 max_segments = 1; 265 max_segments = 1;
@@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q,
275 __func__, max_segments); 267 __func__, max_segments);
276 } 268 }
277 269
278 q->limits.max_phys_segments = max_segments; 270 q->limits.max_segments = max_segments;
279} 271}
280EXPORT_SYMBOL(blk_queue_max_phys_segments); 272EXPORT_SYMBOL(blk_queue_max_segments);
281
282/**
283 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
284 * @q: the request queue for the device
285 * @max_segments: max number of segments
286 *
287 * Description:
288 * Enables a low level driver to set an upper limit on the number of
289 * hw data segments in a request. This would be the largest number of
290 * address/length pairs the host adapter can actually give at once
291 * to the device.
292 **/
293void blk_queue_max_hw_segments(struct request_queue *q,
294 unsigned short max_segments)
295{
296 if (!max_segments) {
297 max_segments = 1;
298 printk(KERN_INFO "%s: set to minimum %d\n",
299 __func__, max_segments);
300 }
301
302 q->limits.max_hw_segments = max_segments;
303}
304EXPORT_SYMBOL(blk_queue_max_hw_segments);
305 273
306/** 274/**
307 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg 275 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
@@ -536,11 +504,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
536 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 504 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
537 b->seg_boundary_mask); 505 b->seg_boundary_mask);
538 506
539 t->max_phys_segments = min_not_zero(t->max_phys_segments, 507 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
540 b->max_phys_segments);
541
542 t->max_hw_segments = min_not_zero(t->max_hw_segments,
543 b->max_hw_segments);
544 508
545 t->max_segment_size = min_not_zero(t->max_segment_size, 509 t->max_segment_size = min_not_zero(t->max_segment_size,
546 b->max_segment_size); 510 b->max_segment_size);
@@ -744,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad);
744 * does is adjust the queue so that the buf is always appended 708 * does is adjust the queue so that the buf is always appended
745 * silently to the scatterlist. 709 * silently to the scatterlist.
746 * 710 *
747 * Note: This routine adjusts max_hw_segments to make room for 711 * Note: This routine adjusts max_hw_segments to make room for appending
748 * appending the drain buffer. If you call 712 * the drain buffer. If you call blk_queue_max_segments() after calling
749 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after 713 * this routine, you must set the limit to one fewer than your device
750 * calling this routine, you must set the limit to one fewer than your 714 * can support otherwise there won't be room for the drain buffer.
751 * device can support otherwise there won't be room for the drain
752 * buffer.
753 */ 715 */
754int blk_queue_dma_drain(struct request_queue *q, 716int blk_queue_dma_drain(struct request_queue *q,
755 dma_drain_needed_fn *dma_drain_needed, 717 dma_drain_needed_fn *dma_drain_needed,
756 void *buf, unsigned int size) 718 void *buf, unsigned int size)
757{ 719{
758 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) 720 if (queue_max_segments(q) < 2)
759 return -EINVAL; 721 return -EINVAL;
760 /* make room for appending the drain */ 722 /* make room for appending the drain */
761 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); 723 blk_queue_max_segments(q, queue_max_segments(q) - 1);
762 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
763 q->dma_drain_needed = dma_drain_needed; 724 q->dma_drain_needed = dma_drain_needed;
764 q->dma_drain_buffer = buf; 725 q->dma_drain_buffer = buf;
765 q->dma_drain_size = size; 726 q->dma_drain_size = size;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d096fbcbc771..bea003a24d27 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1097 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1097 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1098 1098
1099 /* configure max sectors */ 1099 /* configure max sectors */
1100 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 1100 blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
1101 1101
1102 if (dev->class == ATA_DEV_ATAPI) { 1102 if (dev->class == ATA_DEV_ATAPI) {
1103 struct request_queue *q = sdev->request_queue; 1103 struct request_queue *q = sdev->request_queue;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0c82d335c55d..684fe04dbbb7 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
772 } 772 }
773 773
774 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 774 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
775 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); 775 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
776 ata_port_printk(ap, KERN_INFO, 776 ata_port_printk(ap, KERN_INFO,
777 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", 777 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
778 (unsigned long long)*ap->host->dev->dma_mask, 778 (unsigned long long)*ap->host->dev->dma_mask,
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 7412b5d4f5f3..459f1bc25a7b 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2534,8 +2534,8 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2535 RequestQueue->queuedata = Controller; 2535 RequestQueue->queuedata = Controller;
2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2537 blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2537 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2538 blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2539 disk->queue = RequestQueue; 2539 disk->queue = RequestQueue;
2540 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); 2540 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
2541 disk->major = MajorNumber; 2541 disk->major = MajorNumber;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 4f688434daf1..c6ddeacb77fd 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i)
434 goto out_free_dev; 434 goto out_free_dev;
435 blk_queue_make_request(brd->brd_queue, brd_make_request); 435 blk_queue_make_request(brd->brd_queue, brd_make_request);
436 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); 436 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
437 blk_queue_max_sectors(brd->brd_queue, 1024); 437 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
438 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 438 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
439 439
440 disk = brd->brd_disk = alloc_disk(1 << part_shift); 440 disk = brd->brd_disk = alloc_disk(1 << part_shift);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 86acdca5d0ce..a29e69418a03 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1797,12 +1797,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1797 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1797 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1798 1798
1799 /* This is a hardware imposed limit. */ 1799 /* This is a hardware imposed limit. */
1800 blk_queue_max_hw_segments(disk->queue, h->maxsgentries); 1800 blk_queue_max_segments(disk->queue, h->maxsgentries);
1801 1801
1802 /* This is a limit in the driver and could be eliminated. */ 1802 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
1803 blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
1804
1805 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1806 1803
1807 blk_queue_softirq_done(disk->queue, cciss_softirq_done); 1804 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1808 1805
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 6422651ec364..91d11631cec9 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
448 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); 448 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
449 449
450 /* This is a hardware imposed limit. */ 450 /* This is a hardware imposed limit. */
451 blk_queue_max_hw_segments(q, SG_MAX); 451 blk_queue_max_segments(q, SG_MAX);
452 452
453 /* This is a driver limit and could be eliminated. */
454 blk_queue_max_phys_segments(q, SG_MAX);
455
456 init_timer(&hba[i]->timer); 453 init_timer(&hba[i]->timer);
457 hba[i]->timer.expires = jiffies + IDA_TIMER; 454 hba[i]->timer.expires = jiffies + IDA_TIMER;
458 hba[i]->timer.data = (unsigned long)hba[i]; 455 hba[i]->timer.data = (unsigned long)hba[i];
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1292e0620663..4df3b40b1057 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -709,9 +709,8 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
709 709
710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
711 711
712 blk_queue_max_sectors(q, max_seg_s >> 9); 712 blk_queue_max_hw_sectors(q, max_seg_s >> 9);
713 blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); 713 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
714 blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
715 blk_queue_max_segment_size(q, max_seg_s); 714 blk_queue_max_segment_size(q, max_seg_s);
716 blk_queue_logical_block_size(q, 512); 715 blk_queue_logical_block_size(q, 512);
717 blk_queue_segment_boundary(q, PAGE_SIZE-1); 716 blk_queue_segment_boundary(q, PAGE_SIZE-1);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3266b4f65daa..b9b117059b62 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4234,7 +4234,7 @@ static int __init floppy_init(void)
4234 err = -ENOMEM; 4234 err = -ENOMEM;
4235 goto out_unreg_driver; 4235 goto out_unreg_driver;
4236 } 4236 }
4237 blk_queue_max_sectors(floppy_queue, 64); 4237 blk_queue_max_hw_sectors(floppy_queue, 64);
4238 4238
4239 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, 4239 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
4240 floppy_find, NULL, NULL); 4240 floppy_find, NULL, NULL);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index d5cdce08ffd2..5116c65c07cb 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -719,7 +719,7 @@ static int __init hd_init(void)
719 return -ENOMEM; 719 return -ENOMEM;
720 } 720 }
721 721
722 blk_queue_max_sectors(hd_queue, 255); 722 blk_queue_max_hw_sectors(hd_queue, 255);
723 init_timer(&device_timer); 723 init_timer(&device_timer);
724 device_timer.function = hd_times_out; 724 device_timer.function = hd_times_out;
725 blk_queue_logical_block_size(hd_queue, 512); 725 blk_queue_logical_block_size(hd_queue, 512);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 02b2583df7fc..5416c9a606e4 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev)
980 __func__, __LINE__); 980 __func__, __LINE__);
981 goto probe_err_6; 981 goto probe_err_6;
982 } 982 }
983 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 983 blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
984 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); 984 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
985 985
986 init_timer(&host->timer); 986 init_timer(&host->timer);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 569e39e8f114..e712cd51af15 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -906,7 +906,7 @@ static int __init pd_init(void)
906 if (!pd_queue) 906 if (!pd_queue)
907 goto out1; 907 goto out1;
908 908
909 blk_queue_max_sectors(pd_queue, cluster); 909 blk_queue_max_hw_sectors(pd_queue, cluster);
910 910
911 if (register_blkdev(major, name)) 911 if (register_blkdev(major, name))
912 goto out2; 912 goto out2;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ea54ea393553..ddb4f9abd480 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -956,8 +956,7 @@ static int __init pf_init(void)
956 return -ENOMEM; 956 return -ENOMEM;
957 } 957 }
958 958
959 blk_queue_max_phys_segments(pf_queue, cluster); 959 blk_queue_max_segments(pf_queue, cluster);
960 blk_queue_max_hw_segments(pf_queue, cluster);
961 960
962 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 961 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
963 struct gendisk *disk = pf->disk; 962 struct gendisk *disk = pf->disk;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7cd2973ebb7b..b72935b8f203 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -950,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
950static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 950static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
951{ 951{
952 if ((pd->settings.size << 9) / CD_FRAMESIZE 952 if ((pd->settings.size << 9) / CD_FRAMESIZE
953 <= queue_max_phys_segments(q)) { 953 <= queue_max_segments(q)) {
954 /* 954 /*
955 * The cdrom device can handle one segment/frame 955 * The cdrom device can handle one segment/frame
956 */ 956 */
957 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 957 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
958 return 0; 958 return 0;
959 } else if ((pd->settings.size << 9) / PAGE_SIZE 959 } else if ((pd->settings.size << 9) / PAGE_SIZE
960 <= queue_max_phys_segments(q)) { 960 <= queue_max_segments(q)) {
961 /* 961 /*
962 * We can handle this case at the expense of some extra memory 962 * We can handle this case at the expense of some extra memory
963 * copies during write operations 963 * copies during write operations
@@ -2312,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2312 * even if the size is a multiple of the packet size. 2312 * even if the size is a multiple of the packet size.
2313 */ 2313 */
2314 spin_lock_irq(q->queue_lock); 2314 spin_lock_irq(q->queue_lock);
2315 blk_queue_max_sectors(q, pd->settings.size); 2315 blk_queue_max_hw_sectors(q, pd->settings.size);
2316 spin_unlock_irq(q->queue_lock); 2316 spin_unlock_irq(q->queue_lock);
2317 set_bit(PACKET_WRITABLE, &pd->flags); 2317 set_bit(PACKET_WRITABLE, &pd->flags);
2318 } else { 2318 } else {
@@ -2613,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
2613 2613
2614 blk_queue_make_request(q, pkt_make_request); 2614 blk_queue_make_request(q, pkt_make_request);
2615 blk_queue_logical_block_size(q, CD_FRAMESIZE); 2615 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2616 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2616 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2617 blk_queue_merge_bvec(q, pkt_merge_bvec); 2617 blk_queue_merge_bvec(q, pkt_merge_bvec);
2618 q->queuedata = pd; 2618 q->queuedata = pd;
2619} 2619}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 03a130dca8ab..bc95469d33c1 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
474 474
475 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); 475 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
476 476
477 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 477 blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
478 blk_queue_segment_boundary(queue, -1UL); 478 blk_queue_segment_boundary(queue, -1UL);
479 blk_queue_dma_alignment(queue, dev->blk_size-1); 479 blk_queue_dma_alignment(queue, dev->blk_size-1);
480 blk_queue_logical_block_size(queue, dev->blk_size); 480 blk_queue_logical_block_size(queue, dev->blk_size);
@@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
483 ps3disk_prepare_flush); 483 ps3disk_prepare_flush);
484 484
485 blk_queue_max_phys_segments(queue, -1); 485 blk_queue_max_segments(queue, -1);
486 blk_queue_max_hw_segments(queue, -1);
487 blk_queue_max_segment_size(queue, dev->bounce_size); 486 blk_queue_max_segment_size(queue, dev->bounce_size);
488 487
489 gendisk = alloc_disk(PS3DISK_MINORS); 488 gendisk = alloc_disk(PS3DISK_MINORS);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1fb6c3135fc8..83ebb390b164 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -751,10 +751,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
751 priv->queue = queue; 751 priv->queue = queue;
752 queue->queuedata = dev; 752 queue->queuedata = dev;
753 blk_queue_make_request(queue, ps3vram_make_request); 753 blk_queue_make_request(queue, ps3vram_make_request);
754 blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); 754 blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS);
755 blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); 755 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
756 blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE); 756 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
757 blk_queue_max_sectors(queue, SAFE_MAX_SECTORS);
758 757
759 gendisk = alloc_disk(1); 758 gendisk = alloc_disk(1);
760 if (!gendisk) { 759 if (!gendisk) {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 411f064760b4..48e8fee9f2d4 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -691,9 +691,8 @@ static int probe_disk(struct vdc_port *port)
691 691
692 port->disk = g; 692 port->disk = g;
693 693
694 blk_queue_max_hw_segments(q, port->ring_cookies); 694 blk_queue_max_segments(q, port->ring_cookies);
695 blk_queue_max_phys_segments(q, port->ring_cookies); 695 blk_queue_max_hw_sectors(q, port->max_xfer_size);
696 blk_queue_max_sectors(q, port->max_xfer_size);
697 g->major = vdc_major; 696 g->major = vdc_major;
698 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; 697 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
699 strcpy(g->disk_name, port->disk_name); 698 strcpy(g->disk_name, port->disk_name);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 7bd7b2f83116..b70f0fca9a42 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host)
1518 break; 1518 break;
1519 } 1519 }
1520 disk->queue = q; 1520 disk->queue = q;
1521 blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); 1521 blk_queue_max_segments(q, CARM_MAX_REQ_SG);
1522 blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
1523 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 1522 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
1524 1523
1525 q->queuedata = port; 1524 q->queuedata = port;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index d86d1357ccef..2e889838e819 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2320,10 +2320,9 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2320 disk->queue = q; 2320 disk->queue = q;
2321 2321
2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2323 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2323 blk_queue_max_segments(q, UB_MAX_REQ_SG);
2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2324 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2326 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2325 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
2327 blk_queue_logical_block_size(q, lun->capacity.bsize); 2326 blk_queue_logical_block_size(q, lun->capacity.bsize);
2328 2327
2329 lun->disk = disk; 2328 lun->disk = disk;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index a8c8b56b275e..c12b31362ac6 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -471,9 +471,8 @@ retry:
471 } 471 }
472 472
473 d->disk = g; 473 d->disk = g;
474 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); 474 blk_queue_max_segments(q, VIOMAXBLOCKDMA);
475 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); 475 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
476 blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
477 g->major = VIODASD_MAJOR; 476 g->major = VIODASD_MAJOR;
478 g->first_minor = dev_no << PARTITION_SHIFT; 477 g->first_minor = dev_no << PARTITION_SHIFT;
479 if (dev_no >= 26) 478 if (dev_no >= 26)
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index d1fd032e7514..1a325fb05c92 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -242,7 +242,7 @@ static int __init xd_init(void)
242 } 242 }
243 243
244 /* xd_maxsectors depends on controller - so set after detection */ 244 /* xd_maxsectors depends on controller - so set after detection */
245 blk_queue_max_sectors(xd_queue, xd_maxsectors); 245 blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
246 246
247 for (i = 0; i < xd_drives; i++) 247 for (i = 0; i < xd_drives; i++)
248 add_disk(xd_gendisk[i]); 248 add_disk(xd_gendisk[i]);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a84702d1a35e..9c09694b2520 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -346,15 +346,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
346 346
347 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 347 /* Hard sector size and max sectors impersonate the equiv. hardware. */
348 blk_queue_logical_block_size(rq, sector_size); 348 blk_queue_logical_block_size(rq, sector_size);
349 blk_queue_max_sectors(rq, 512); 349 blk_queue_max_hw_sectors(rq, 512);
350 350
351 /* Each segment in a request is up to an aligned page in size. */ 351 /* Each segment in a request is up to an aligned page in size. */
352 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 352 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
353 blk_queue_max_segment_size(rq, PAGE_SIZE); 353 blk_queue_max_segment_size(rq, PAGE_SIZE);
354 354
355 /* Ensure a merged request will fit in a single I/O ring slot. */ 355 /* Ensure a merged request will fit in a single I/O ring slot. */
356 blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 356 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
357 blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
358 357
359 /* Make sure buffer addresses are sector-aligned. */ 358 /* Make sure buffer addresses are sector-aligned. */
360 blk_queue_dma_alignment(rq, 511); 359 blk_queue_dma_alignment(rq, 511);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index e789e6c9a422..03c71f7698cb 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void)
741{ 741{
742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
743 /* using DMA so memory will need to be contiguous */ 743 /* using DMA so memory will need to be contiguous */
744 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 744 blk_queue_max_segments(gd.gdrom_rq, 1);
745 /* set a large max size to get most from DMA */ 745 /* set a large max size to get most from DMA */
746 blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); 746 blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
747 gd.disk->queue = gd.gdrom_rq; 747 gd.disk->queue = gd.gdrom_rq;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 57ca69e0ac55..cc435be0bc13 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -616,9 +616,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
616 gendisk->first_minor = deviceno; 616 gendisk->first_minor = deviceno;
617 strncpy(gendisk->disk_name, c->name, 617 strncpy(gendisk->disk_name, c->name,
618 sizeof(gendisk->disk_name)); 618 sizeof(gendisk->disk_name));
619 blk_queue_max_hw_segments(q, 1); 619 blk_queue_max_segments(q, 1);
620 blk_queue_max_phys_segments(q, 1); 620 blk_queue_max_hw_sectors(q, 4096 / 512);
621 blk_queue_max_sectors(q, 4096 / 512);
622 gendisk->queue = q; 621 gendisk->queue = q;
623 gendisk->fops = &viocd_fops; 622 gendisk->fops = &viocd_fops;
624 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 623 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index d485cdd8cbac..70fef40cd22f 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1571,7 +1571,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1571 sdev->start_stop_pwr_cond = 1; 1571 sdev->start_stop_pwr_cond = 1;
1572 1572
1573 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 1573 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
1574 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 1574 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
1575 1575
1576 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); 1576 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
1577 1577
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 7f878017b736..3b128dce9c3a 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -679,7 +679,7 @@ static void ide_disk_setup(ide_drive_t *drive)
679 if (max_s > hwif->rqsize) 679 if (max_s > hwif->rqsize)
680 max_s = hwif->rqsize; 680 max_s = hwif->rqsize;
681 681
682 blk_queue_max_sectors(q, max_s); 682 blk_queue_max_hw_sectors(q, max_s);
683 } 683 }
684 684
685 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 685 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index fefbdfc8db06..efd907623469 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -486,7 +486,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
486 drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; 486 drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
487 /* This value will be visible in the /proc/ide/hdx/settings */ 487 /* This value will be visible in the /proc/ide/hdx/settings */
488 drive->pc_delay = IDEFLOPPY_PC_DELAY; 488 drive->pc_delay = IDEFLOPPY_PC_DELAY;
489 blk_queue_max_sectors(drive->queue, 64); 489 blk_queue_max_hw_sectors(drive->queue, 64);
490 } 490 }
491 491
492 /* 492 /*
@@ -494,7 +494,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
494 * nasty clicking noises without it, so please don't remove this. 494 * nasty clicking noises without it, so please don't remove this.
495 */ 495 */
496 if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { 496 if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
497 blk_queue_max_sectors(drive->queue, 64); 497 blk_queue_max_hw_sectors(drive->queue, 64);
498 drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; 498 drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
499 /* IOMEGA Clik! drives do not support lock/unlock commands */ 499 /* IOMEGA Clik! drives do not support lock/unlock commands */
500 drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; 500 drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4d76ba473097..f8c1ae6ad74c 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -774,7 +774,7 @@ static int ide_init_queue(ide_drive_t *drive)
774 774
775 if (hwif->rqsize < max_sectors) 775 if (hwif->rqsize < max_sectors)
776 max_sectors = hwif->rqsize; 776 max_sectors = hwif->rqsize;
777 blk_queue_max_sectors(q, max_sectors); 777 blk_queue_max_hw_sectors(q, max_sectors);
778 778
779#ifdef CONFIG_PCI 779#ifdef CONFIG_PCI
780 /* When we have an IOMMU, we may have a problem where pci_map_sg() 780 /* When we have an IOMMU, we may have a problem where pci_map_sg()
@@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive)
790 max_sg_entries >>= 1; 790 max_sg_entries >>= 1;
791#endif /* CONFIG_PCI */ 791#endif /* CONFIG_PCI */
792 792
793 blk_queue_max_hw_segments(q, max_sg_entries); 793 blk_queue_max_segments(q, max_sg_entries);
794 blk_queue_max_phys_segments(q, max_sg_entries);
795 794
796 /* assign drive queue */ 795 /* assign drive queue */
797 drive->queue = q; 796 drive->queue = q;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f199896c4113..c88696a6cf8a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -2020,7 +2020,7 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2020 if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION) 2020 if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
2021 sdev->start_stop_pwr_cond = 1; 2021 sdev->start_stop_pwr_cond = 1;
2022 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) 2022 if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
2023 blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); 2023 blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
2024 2024
2025 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); 2025 blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
2026 return 0; 2026 return 0;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00435bd20699..af2d39d603c7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -177,7 +177,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
177 */ 177 */
178 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 178 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
179 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 179 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
180 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 180 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
181 181
182 conf->array_sectors += rdev->sectors; 182 conf->array_sectors += rdev->sectors;
183 cnt++; 183 cnt++;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 32a662fc55c9..4b323f45ad74 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -308,7 +308,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
308 */ 308 */
309 if (q->merge_bvec_fn && 309 if (q->merge_bvec_fn &&
310 queue_max_sectors(q) > (PAGE_SIZE>>9)) 310 queue_max_sectors(q) > (PAGE_SIZE>>9))
311 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 311 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
312 312
313 conf->working_disks++; 313 conf->working_disks++;
314 mddev->degraded--; 314 mddev->degraded--;
@@ -478,7 +478,7 @@ static int multipath_run (mddev_t *mddev)
478 * a merge_bvec_fn to be involved in multipath */ 478 * a merge_bvec_fn to be involved in multipath */
479 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 479 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
480 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 480 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
481 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 481 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
482 482
483 if (!test_bit(Faulty, &rdev->flags)) 483 if (!test_bit(Faulty, &rdev->flags))
484 conf->working_disks++; 484 conf->working_disks++;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 77605cdceaf1..a1f7147b757f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -182,7 +182,7 @@ static int create_strip_zones(mddev_t *mddev)
182 182
183 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 183 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
184 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 184 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
185 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 185 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
186 186
187 if (!smallest || (rdev1->sectors < smallest->sectors)) 187 if (!smallest || (rdev1->sectors < smallest->sectors))
188 smallest = rdev1; 188 smallest = rdev1;
@@ -325,7 +325,7 @@ static int raid0_run(mddev_t *mddev)
325 } 325 }
326 if (md_check_no_bitmap(mddev)) 326 if (md_check_no_bitmap(mddev))
327 return -EINVAL; 327 return -EINVAL;
328 blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors); 328 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
329 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 329 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
330 330
331 ret = create_strip_zones(mddev); 331 ret = create_strip_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 859bd3ffe435..5a06122abd3b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1158,7 +1158,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1158 */ 1158 */
1159 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1159 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1160 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1160 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1161 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1161 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
1162 1162
1163 p->head_position = 0; 1163 p->head_position = 0;
1164 rdev->raid_disk = mirror; 1164 rdev->raid_disk = mirror;
@@ -2103,7 +2103,7 @@ static int run(mddev_t *mddev)
2103 */ 2103 */
2104 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2104 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2105 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2105 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2106 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2106 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
2107 } 2107 }
2108 2108
2109 mddev->degraded = 0; 2109 mddev->degraded = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d119b7b75e71..7584f9ab9bcf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1161,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1161 */ 1161 */
1162 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1162 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1163 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1163 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1164 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1164 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
1165 1165
1166 p->head_position = 0; 1166 p->head_position = 0;
1167 rdev->raid_disk = mirror; 1167 rdev->raid_disk = mirror;
@@ -2260,7 +2260,7 @@ static int run(mddev_t *mddev)
2260 */ 2260 */
2261 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2261 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2262 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2262 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2263 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2263 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
2264 2264
2265 disk->head_position = 0; 2265 disk->head_position = 0;
2266 } 2266 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ceb24afdc147..509c8f3dd9a5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi)
3739 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3739 if ((bi->bi_size>>9) > queue_max_sectors(q))
3740 return 0; 3740 return 0;
3741 blk_recount_segments(q, bi); 3741 blk_recount_segments(q, bi);
3742 if (bi->bi_phys_segments > queue_max_phys_segments(q)) 3742 if (bi->bi_phys_segments > queue_max_segments(q))
3743 return 0; 3743 return 0;
3744 3744
3745 if (q->merge_bvec_fn) 3745 if (q->merge_bvec_fn)
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index bd83fa0a4970..972b87069d55 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1226,9 +1226,8 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1226 blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); 1226 blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
1227 1227
1228 blk_queue_bounce_limit(msb->queue, limit); 1228 blk_queue_bounce_limit(msb->queue, limit);
1229 blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); 1229 blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
1230 blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); 1230 blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
1231 blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
1232 blk_queue_max_segment_size(msb->queue, 1231 blk_queue_max_segment_size(msb->queue,
1233 MSPRO_BLOCK_MAX_PAGES * msb->page_size); 1232 MSPRO_BLOCK_MAX_PAGES * msb->page_size);
1234 1233
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index e39986a78273..2658b1484a2c 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev)
1065 queue = gd->queue; 1065 queue = gd->queue;
1066 queue->queuedata = i2o_blk_dev; 1066 queue->queuedata = i2o_blk_dev;
1067 1067
1068 blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1068 blk_queue_max_hw_sectors(queue, max_sectors);
1069 blk_queue_max_sectors(queue, max_sectors); 1069 blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
1070 blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
1071 1070
1072 osm_debug("max sectors = %d\n", queue->max_sectors); 1071 osm_debug("max sectors = %d\n", queue->max_sectors);
1073 osm_debug("phys segments = %d\n", queue->max_phys_segments); 1072 osm_debug("phys segments = %d\n", queue->max_phys_segments);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c5a7a855f4b1..381fe032caa1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -154,9 +154,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
154 154
155 if (mq->bounce_buf) { 155 if (mq->bounce_buf) {
156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
157 blk_queue_max_sectors(mq->queue, bouncesz / 512); 157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
158 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 158 blk_queue_max_segments(mq->queue, bouncesz / 512);
159 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
160 blk_queue_max_segment_size(mq->queue, bouncesz); 159 blk_queue_max_segment_size(mq->queue, bouncesz);
161 160
162 mq->sg = kmalloc(sizeof(struct scatterlist), 161 mq->sg = kmalloc(sizeof(struct scatterlist),
@@ -180,10 +179,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
180 179
181 if (!mq->bounce_buf) { 180 if (!mq->bounce_buf) {
182 blk_queue_bounce_limit(mq->queue, limit); 181 blk_queue_bounce_limit(mq->queue, limit);
183 blk_queue_max_sectors(mq->queue, 182 blk_queue_max_hw_sectors(mq->queue,
184 min(host->max_blk_count, host->max_req_size / 512)); 183 min(host->max_blk_count, host->max_req_size / 512));
185 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 184 blk_queue_max_segments(mq->queue, host->max_hw_segs);
186 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
187 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 185 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
188 186
189 mq->sg = kmalloc(sizeof(struct scatterlist) * 187 mq->sg = kmalloc(sizeof(struct scatterlist) *
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5905936c7c60..8831e9308d05 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2129,9 +2129,8 @@ static void dasd_setup_queue(struct dasd_block *block)
2129 2129
2130 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2130 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2131 max = block->base->discipline->max_blocks << block->s2b_shift; 2131 max = block->base->discipline->max_blocks << block->s2b_shift;
2132 blk_queue_max_sectors(block->request_queue, max); 2132 blk_queue_max_hw_sectors(block->request_queue, max);
2133 blk_queue_max_phys_segments(block->request_queue, -1L); 2133 blk_queue_max_segments(block->request_queue, -1L);
2134 blk_queue_max_hw_segments(block->request_queue, -1L);
2135 /* with page sized segments we can translate each segement into 2134 /* with page sized segments we can translate each segement into
2136 * one idaw/tidaw 2135 * one idaw/tidaw
2137 */ 2136 */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 8d3d720737da..097da8ce6be6 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -222,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device)
222 goto cleanup_queue; 222 goto cleanup_queue;
223 223
224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 224 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
225 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 225 blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
226 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 226 blk_queue_max_segments(blkdat->request_queue, -1L);
227 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
228 blk_queue_max_segment_size(blkdat->request_queue, -1L); 227 blk_queue_max_segment_size(blkdat->request_queue, -1L);
229 blk_queue_segment_boundary(blkdat->request_queue, -1L); 228 blk_queue_segment_boundary(blkdat->request_queue, -1L);
230 229
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 87b536a97cb4..732f6d35b4a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4195 if (tgt->service_parms.class3_parms[0] & 0x80000000) 4195 if (tgt->service_parms.class3_parms[0] & 0x80000000)
4196 rport->supported_classes |= FC_COS_CLASS3; 4196 rport->supported_classes |= FC_COS_CLASS3;
4197 if (rport->rqst_q) 4197 if (rport->rqst_q)
4198 blk_queue_max_hw_segments(rport->rqst_q, 1); 4198 blk_queue_max_segments(rport->rqst_q, 1);
4199 } else 4199 } else
4200 tgt_dbg(tgt, "rport add failed\n"); 4200 tgt_dbg(tgt, "rport add failed\n");
4201 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4201 spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4669 } 4669 }
4670 4670
4671 if (shost_to_fc_host(shost)->rqst_q) 4671 if (shost_to_fc_host(shost)->rqst_q)
4672 blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); 4672 blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
4673 dev_set_drvdata(dev, vhost); 4673 dev_set_drvdata(dev, vhost);
4674 spin_lock(&ibmvfc_driver_lock); 4674 spin_lock(&ibmvfc_driver_lock);
4675 list_add_tail(&vhost->queue, &ibmvfc_head); 4675 list_add_tail(&vhost->queue, &ibmvfc_head);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9e52d16c7c39..032f0d0e6cb4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3674,7 +3674,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3674 if (ipr_is_vset_device(res)) { 3674 if (ipr_is_vset_device(res)) {
3675 blk_queue_rq_timeout(sdev->request_queue, 3675 blk_queue_rq_timeout(sdev->request_queue,
3676 IPR_VSET_RW_TIMEOUT); 3676 IPR_VSET_RW_TIMEOUT);
3677 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3677 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3678 } 3678 }
3679 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3679 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3680 sdev->allow_restart = 1; 3680 sdev->allow_restart = 1;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b6f1ef954af1..9b1c1433c26b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -235,7 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
235 scsi_dev->allow_restart = 1; 235 scsi_dev->allow_restart = 1;
236 blk_queue_rq_timeout(scsi_dev->request_queue, 236 blk_queue_rq_timeout(scsi_dev->request_queue,
237 PMCRAID_VSET_IO_TIMEOUT); 237 PMCRAID_VSET_IO_TIMEOUT);
238 blk_queue_max_sectors(scsi_dev->request_queue, 238 blk_queue_max_hw_sectors(scsi_dev->request_queue,
239 PMCRAID_VSET_MAX_SECTORS); 239 PMCRAID_VSET_MAX_SECTORS);
240 } 240 }
241 241
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c6642423cc67..f8fbf47377ae 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1624,10 +1624,10 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1624 /* 1624 /*
1625 * this limit is imposed by hardware restrictions 1625 * this limit is imposed by hardware restrictions
1626 */ 1626 */
1627 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1627 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1628 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); 1628 SCSI_MAX_SG_CHAIN_SEGMENTS));
1629 1629
1630 blk_queue_max_sectors(q, shost->max_sectors); 1630 blk_queue_max_hw_sectors(q, shost->max_sectors);
1631 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1631 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1632 blk_queue_segment_boundary(q, shost->dma_boundary); 1632 blk_queue_segment_boundary(q, shost->dma_boundary);
1633 dma_set_seg_boundary(dev, shost->dma_boundary); 1633 dma_set_seg_boundary(dev, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 012f73a96880..5d9b5130d8c8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -879,7 +879,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
879 * broken RA4x00 Compaq Disk Array 879 * broken RA4x00 Compaq Disk Array
880 */ 880 */
881 if (*bflags & BLIST_MAX_512) 881 if (*bflags & BLIST_MAX_512)
882 blk_queue_max_sectors(sdev->request_queue, 512); 882 blk_queue_max_hw_sectors(sdev->request_queue, 512);
883 883
884 /* 884 /*
885 * Some devices may not want to have a start command automatically 885 * Some devices may not want to have a start command automatically
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 040f751809ea..c996d98636f3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp)
287 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 287 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
288 sdp->sgdebug = 0; 288 sdp->sgdebug = 0;
289 q = sdp->device->request_queue; 289 q = sdp->device->request_queue;
290 sdp->sg_tablesize = min(queue_max_hw_segments(q), 290 sdp->sg_tablesize = queue_max_segments(q);
291 queue_max_phys_segments(q));
292 } 291 }
293 if ((sfp = sg_add_sfp(sdp, dev))) 292 if ((sfp = sg_add_sfp(sdp, dev)))
294 filp->private_data = sfp; 293 filp->private_data = sfp;
@@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1376 sdp->device = scsidp; 1375 sdp->device = scsidp;
1377 INIT_LIST_HEAD(&sdp->sfds); 1376 INIT_LIST_HEAD(&sdp->sfds);
1378 init_waitqueue_head(&sdp->o_excl_wait); 1377 init_waitqueue_head(&sdp->o_excl_wait);
1379 sdp->sg_tablesize = min(queue_max_hw_segments(q), 1378 sdp->sg_tablesize = queue_max_segments(q);
1380 queue_max_phys_segments(q));
1381 sdp->index = k; 1379 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1380 kref_init(&sdp->d_ref);
1383 1381
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d04ea9a6f673..f67d1a159aad 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(queue_max_hw_segments(SDp->request_queue), 3986 i = queue_max_segments(SDp->request_queue);
3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3987 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3988 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3989 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 62b282844a53..45d908114d11 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device)
363 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); 363 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
364 364
365 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); 365 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
366 blk_queue_max_phys_segments(blkdev->gd->queue, 366 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
367 MAX_MULTIPAGE_BUFFER_COUNT);
368 blk_queue_max_hw_segments(blkdev->gd->queue,
369 MAX_MULTIPAGE_BUFFER_COUNT);
370 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); 367 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
371 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); 368 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
372 blk_queue_dma_alignment(blkdev->gd->queue, 511); 369 blk_queue_dma_alignment(blkdev->gd->queue, 511);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e5e6df39e737..aadc16b5eed7 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -134,14 +134,14 @@ static int slave_configure(struct scsi_device *sdev)
134 if (us->fflags & US_FL_MAX_SECTORS_MIN) 134 if (us->fflags & US_FL_MAX_SECTORS_MIN)
135 max_sectors = PAGE_CACHE_SIZE >> 9; 135 max_sectors = PAGE_CACHE_SIZE >> 9;
136 if (queue_max_sectors(sdev->request_queue) > max_sectors) 136 if (queue_max_sectors(sdev->request_queue) > max_sectors)
137 blk_queue_max_sectors(sdev->request_queue, 137 blk_queue_max_hw_sectors(sdev->request_queue,
138 max_sectors); 138 max_sectors);
139 } else if (sdev->type == TYPE_TAPE) { 139 } else if (sdev->type == TYPE_TAPE) {
140 /* Tapes need much higher max_sector limits, so just 140 /* Tapes need much higher max_sector limits, so just
141 * raise it to the maximum possible (4 GB / 512) and 141 * raise it to the maximum possible (4 GB / 512) and
142 * let the queue segment size sort out the real limit. 142 * let the queue segment size sort out the real limit.
143 */ 143 */
144 blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF); 144 blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
145 } 145 }
146 146
147 /* Some USB host controllers can't do DMA; they have to use PIO. 147 /* Some USB host controllers can't do DMA; they have to use PIO.
@@ -495,7 +495,7 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
495 unsigned short ms; 495 unsigned short ms;
496 496
497 if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { 497 if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
498 blk_queue_max_sectors(sdev->request_queue, ms); 498 blk_queue_max_hw_sectors(sdev->request_queue, ms);
499 return strlen(buf); 499 return strlen(buf);
500 } 500 }
501 return -EINVAL; 501 return -EINVAL;
diff --git a/fs/bio.c b/fs/bio.c
index 88094afc29ea..dc17afd672e3 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
507 int nr_pages; 507 int nr_pages;
508 508
509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 if (nr_pages > queue_max_phys_segments(q)) 510 if (nr_pages > queue_max_segments(q))
511 nr_pages = queue_max_phys_segments(q); 511 nr_pages = queue_max_segments(q);
512 if (nr_pages > queue_max_hw_segments(q))
513 nr_pages = queue_max_hw_segments(q);
514 512
515 return nr_pages; 513 return nr_pages;
516} 514}
@@ -575,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
575 * make this too complex. 573 * make this too complex.
576 */ 574 */
577 575
578 while (bio->bi_phys_segments >= queue_max_phys_segments(q) 576 while (bio->bi_phys_segments >= queue_max_segments(q)) {
579 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
580 577
581 if (retried_segments) 578 if (retried_segments)
582 return 0; 579 return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2f17793048e7..ebd22dbed861 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -316,8 +316,7 @@ struct queue_limits {
316 unsigned int discard_alignment; 316 unsigned int discard_alignment;
317 317
318 unsigned short logical_block_size; 318 unsigned short logical_block_size;
319 unsigned short max_hw_segments; 319 unsigned short max_segments;
320 unsigned short max_phys_segments;
321 320
322 unsigned char misaligned; 321 unsigned char misaligned;
323 unsigned char discard_misaligned; 322 unsigned char discard_misaligned;
@@ -921,10 +920,27 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
921extern void blk_cleanup_queue(struct request_queue *); 920extern void blk_cleanup_queue(struct request_queue *);
922extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 921extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
923extern void blk_queue_bounce_limit(struct request_queue *, u64); 922extern void blk_queue_bounce_limit(struct request_queue *, u64);
924extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
925extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
926extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 924
927extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 925/* Temporary compatibility wrapper */
926static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
927{
928 blk_queue_max_hw_sectors(q, max);
929}
930
931extern void blk_queue_max_segments(struct request_queue *, unsigned short);
932
933static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
934{
935 blk_queue_max_segments(q, max);
936}
937
938static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
939{
940 blk_queue_max_segments(q, max);
941}
942
943
928extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 944extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
929extern void blk_queue_max_discard_sectors(struct request_queue *q, 945extern void blk_queue_max_discard_sectors(struct request_queue *q,
930 unsigned int max_discard_sectors); 946 unsigned int max_discard_sectors);
@@ -1017,11 +1033,15 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1017#define MAX_PHYS_SEGMENTS 128 1033#define MAX_PHYS_SEGMENTS 128
1018#define MAX_HW_SEGMENTS 128 1034#define MAX_HW_SEGMENTS 128
1019#define SAFE_MAX_SECTORS 255 1035#define SAFE_MAX_SECTORS 255
1020#define BLK_DEF_MAX_SECTORS 1024
1021
1022#define MAX_SEGMENT_SIZE 65536 1036#define MAX_SEGMENT_SIZE 65536
1023 1037
1024#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 1038enum blk_default_limits {
1039 BLK_MAX_SEGMENTS = 128,
1040 BLK_SAFE_MAX_SECTORS = 255,
1041 BLK_DEF_MAX_SECTORS = 1024,
1042 BLK_MAX_SEGMENT_SIZE = 65536,
1043 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1044};
1025 1045
1026#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1046#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1027 1047
@@ -1045,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1045 return q->limits.max_hw_sectors; 1065 return q->limits.max_hw_sectors;
1046} 1066}
1047 1067
1048static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1068static inline unsigned short queue_max_segments(struct request_queue *q)
1049{
1050 return q->limits.max_hw_segments;
1051}
1052
1053static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1054{ 1069{
1055 return q->limits.max_phys_segments; 1070 return q->limits.max_segments;
1056} 1071}
1057 1072
1058static inline unsigned int queue_max_segment_size(struct request_queue *q) 1073static inline unsigned int queue_max_segment_size(struct request_queue *q)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 4c4e57d1f19d..87018dc5527d 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -385,7 +385,7 @@
385/* defines for max_sectors and max_phys_segments */ 385/* defines for max_sectors and max_phys_segments */
386#define I2O_MAX_SECTORS 1024 386#define I2O_MAX_SECTORS 1024
387#define I2O_MAX_SECTORS_LIMITED 128 387#define I2O_MAX_SECTORS_LIMITED 128
388#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS 388#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS
389 389
390/* 390/*
391 * Message structures 391 * Message structures