aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-02-26 00:20:39 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-26 07:58:08 -0500
commit8a78362c4eefc1deddbefe2c7f38aabbc2429d6b (patch)
treec095d95af1aec0f9cee5975b1dcdc6bc1d17d401 /drivers/block
parent086fa5ff0854c676ec333760f4c0154b3b242616 (diff)
block: Consolidate phys_segment and hw_segment limits
Except for SCSI no device drivers distinguish between physical and hardware segment limits. Consolidate the two into a single segment limit. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss.c5
-rw-r--r--drivers/block/cpqarray.c5
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/paride/pf.c3
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/ps3disk.c3
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/sunvdc.c3
-rw-r--r--drivers/block/sx8.c3
-rw-r--r--drivers/block/ub.c3
-rw-r--r--drivers/block/viodasd.c3
-rw-r--r--drivers/block/xen-blkfront.c3
13 files changed, 14 insertions, 29 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 1c0cd35e1913..459f1bc25a7b 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2534,7 +2534,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2535 RequestQueue->queuedata = Controller; 2535 RequestQueue->queuedata = Controller;
2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2537 blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2537 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2539 disk->queue = RequestQueue; 2539 disk->queue = RequestQueue;
2540 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); 2540 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 030e52d72254..a29e69418a03 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1797,10 +1797,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1797 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1797 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1798 1798
1799 /* This is a hardware imposed limit. */ 1799 /* This is a hardware imposed limit. */
1800 blk_queue_max_hw_segments(disk->queue, h->maxsgentries); 1800 blk_queue_max_segments(disk->queue, h->maxsgentries);
1801
1802 /* This is a limit in the driver and could be eliminated. */
1803 blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
1804 1801
1805 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); 1802 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
1806 1803
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 6422651ec364..91d11631cec9 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
448 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); 448 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
449 449
450 /* This is a hardware imposed limit. */ 450 /* This is a hardware imposed limit. */
451 blk_queue_max_hw_segments(q, SG_MAX); 451 blk_queue_max_segments(q, SG_MAX);
452 452
453 /* This is a driver limit and could be eliminated. */
454 blk_queue_max_phys_segments(q, SG_MAX);
455
456 init_timer(&hba[i]->timer); 453 init_timer(&hba[i]->timer);
457 hba[i]->timer.expires = jiffies + IDA_TIMER; 454 hba[i]->timer.expires = jiffies + IDA_TIMER;
458 hba[i]->timer.data = (unsigned long)hba[i]; 455 hba[i]->timer.data = (unsigned long)hba[i];
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 9b55e64196fc..4df3b40b1057 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -710,8 +710,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
711 711
712 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 712 blk_queue_max_hw_sectors(q, max_seg_s >> 9);
713 blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); 713 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
714 blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
715 blk_queue_max_segment_size(q, max_seg_s); 714 blk_queue_max_segment_size(q, max_seg_s);
716 blk_queue_logical_block_size(q, 512); 715 blk_queue_logical_block_size(q, 512);
717 blk_queue_segment_boundary(q, PAGE_SIZE-1); 716 blk_queue_segment_boundary(q, PAGE_SIZE-1);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ea54ea393553..ddb4f9abd480 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -956,8 +956,7 @@ static int __init pf_init(void)
956 return -ENOMEM; 956 return -ENOMEM;
957 } 957 }
958 958
959 blk_queue_max_phys_segments(pf_queue, cluster); 959 blk_queue_max_segments(pf_queue, cluster);
960 blk_queue_max_hw_segments(pf_queue, cluster);
961 960
962 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 961 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
963 struct gendisk *disk = pf->disk; 962 struct gendisk *disk = pf->disk;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6e1daa24da2f..b72935b8f203 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -950,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
950static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 950static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
951{ 951{
952 if ((pd->settings.size << 9) / CD_FRAMESIZE 952 if ((pd->settings.size << 9) / CD_FRAMESIZE
953 <= queue_max_phys_segments(q)) { 953 <= queue_max_segments(q)) {
954 /* 954 /*
955 * The cdrom device can handle one segment/frame 955 * The cdrom device can handle one segment/frame
956 */ 956 */
957 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 957 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
958 return 0; 958 return 0;
959 } else if ((pd->settings.size << 9) / PAGE_SIZE 959 } else if ((pd->settings.size << 9) / PAGE_SIZE
960 <= queue_max_phys_segments(q)) { 960 <= queue_max_segments(q)) {
961 /* 961 /*
962 * We can handle this case at the expense of some extra memory 962 * We can handle this case at the expense of some extra memory
963 * copies during write operations 963 * copies during write operations
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 9cd1a4a542b8..bc95469d33c1 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
483 ps3disk_prepare_flush); 483 ps3disk_prepare_flush);
484 484
485 blk_queue_max_phys_segments(queue, -1); 485 blk_queue_max_segments(queue, -1);
486 blk_queue_max_hw_segments(queue, -1);
487 blk_queue_max_segment_size(queue, dev->bounce_size); 486 blk_queue_max_segment_size(queue, dev->bounce_size);
488 487
489 gendisk = alloc_disk(PS3DISK_MINORS); 488 gendisk = alloc_disk(PS3DISK_MINORS);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 6416b262934b..83ebb390b164 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -751,8 +751,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
751 priv->queue = queue; 751 priv->queue = queue;
752 queue->queuedata = dev; 752 queue->queuedata = dev;
753 blk_queue_make_request(queue, ps3vram_make_request); 753 blk_queue_make_request(queue, ps3vram_make_request);
754 blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); 754 blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS);
755 blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
756 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); 755 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
757 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); 756 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
758 757
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index dd30cddd0f7f..48e8fee9f2d4 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -691,8 +691,7 @@ static int probe_disk(struct vdc_port *port)
691 691
692 port->disk = g; 692 port->disk = g;
693 693
694 blk_queue_max_hw_segments(q, port->ring_cookies); 694 blk_queue_max_segments(q, port->ring_cookies);
695 blk_queue_max_phys_segments(q, port->ring_cookies);
696 blk_queue_max_hw_sectors(q, port->max_xfer_size); 695 blk_queue_max_hw_sectors(q, port->max_xfer_size);
697 g->major = vdc_major; 696 g->major = vdc_major;
698 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; 697 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 7bd7b2f83116..b70f0fca9a42 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host)
1518 break; 1518 break;
1519 } 1519 }
1520 disk->queue = q; 1520 disk->queue = q;
1521 blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); 1521 blk_queue_max_segments(q, CARM_MAX_REQ_SG);
1522 blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
1523 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); 1522 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
1524 1523
1525 q->queuedata = port; 1524 q->queuedata = port;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 352ea24d66e8..2e889838e819 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2320,8 +2320,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2320 disk->queue = q; 2320 disk->queue = q;
2321 2321
2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 2322 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2323 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); 2323 blk_queue_max_segments(q, UB_MAX_REQ_SG);
2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2324 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2326 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); 2325 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
2327 blk_queue_logical_block_size(q, lun->capacity.bsize); 2326 blk_queue_logical_block_size(q, lun->capacity.bsize);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index d44ece7d7b7c..c12b31362ac6 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -471,8 +471,7 @@ retry:
471 } 471 }
472 472
473 d->disk = g; 473 d->disk = g;
474 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); 474 blk_queue_max_segments(q, VIOMAXBLOCKDMA);
475 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
476 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); 475 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
477 g->major = VIODASD_MAJOR; 476 g->major = VIODASD_MAJOR;
478 g->first_minor = dev_no << PARTITION_SHIFT; 477 g->first_minor = dev_no << PARTITION_SHIFT;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f9861aaa1fef..9c09694b2520 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -353,8 +353,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
353 blk_queue_max_segment_size(rq, PAGE_SIZE); 353 blk_queue_max_segment_size(rq, PAGE_SIZE);
354 354
355 /* Ensure a merged request will fit in a single I/O ring slot. */ 355 /* Ensure a merged request will fit in a single I/O ring slot. */
356 blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); 356 blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
357 blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
358 357
359 /* Make sure buffer addresses are sector-aligned. */ 358 /* Make sure buffer addresses are sector-aligned. */
360 blk_queue_dma_alignment(rq, 511); 359 blk_queue_dma_alignment(rq, 511);