aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/cciss.c
diff options
context:
space:
mode:
authorDon Brace <brace@beardog.cce.hp.com>2009-11-12 13:50:01 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-11-13 02:45:54 -0500
commit5c07a311a80adb0138fc08e8279c60255d88d0b8 (patch)
tree1746b4ec4b7ea1f2f02b34d5c0adc86dc8e0d97f /drivers/block/cciss.c
parentda0021841c3ea6a82588efae3260015847ea5d33 (diff)
cciss: Add enhanced scatter-gather support.
cciss: Add enhanced scatter-gather support. For controllers which supported, more than 512 scatter-gather elements per command may be used, and the max transfer size can be increased to 8192 blocks. Signed-off-by: Don Brace <brace@beardog.cce.hp.com> Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/cciss.c')
-rw-r--r--drivers/block/cciss.c183
1 files changed, 165 insertions, 18 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index bf2d1c80b788..1bd313dcf6af 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1655,9 +1655,11 @@ static void cciss_softirq_done(struct request *rq)
1655{ 1655{
1656 CommandList_struct *cmd = rq->completion_data; 1656 CommandList_struct *cmd = rq->completion_data;
1657 ctlr_info_t *h = hba[cmd->ctlr]; 1657 ctlr_info_t *h = hba[cmd->ctlr];
1658 SGDescriptor_struct *curr_sg = cmd->SG;
1658 unsigned long flags; 1659 unsigned long flags;
1659 u64bit temp64; 1660 u64bit temp64;
1660 int i, ddir; 1661 int i, ddir;
1662 int sg_index = 0;
1661 1663
1662 if (cmd->Request.Type.Direction == XFER_READ) 1664 if (cmd->Request.Type.Direction == XFER_READ)
1663 ddir = PCI_DMA_FROMDEVICE; 1665 ddir = PCI_DMA_FROMDEVICE;
@@ -1667,9 +1669,22 @@ static void cciss_softirq_done(struct request *rq)
1667 /* command did not need to be retried */ 1669 /* command did not need to be retried */
1668 /* unmap the DMA mapping for all the scatter gather elements */ 1670 /* unmap the DMA mapping for all the scatter gather elements */
1669 for (i = 0; i < cmd->Header.SGList; i++) { 1671 for (i = 0; i < cmd->Header.SGList; i++) {
1670 temp64.val32.lower = cmd->SG[i].Addr.lower; 1672 if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
1671 temp64.val32.upper = cmd->SG[i].Addr.upper; 1673 temp64.val32.lower = cmd->SG[i].Addr.lower;
1672 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); 1674 temp64.val32.upper = cmd->SG[i].Addr.upper;
1675 pci_dma_sync_single_for_cpu(h->pdev, temp64.val,
1676 cmd->SG[i].Len, ddir);
1677 pci_unmap_single(h->pdev, temp64.val,
1678 cmd->SG[i].Len, ddir);
1679 /* Point to the next block */
1680 curr_sg = h->cmd_sg_list[cmd->cmdindex]->sgchain;
1681 sg_index = 0;
1682 }
1683 temp64.val32.lower = curr_sg[sg_index].Addr.lower;
1684 temp64.val32.upper = curr_sg[sg_index].Addr.upper;
1685 pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
1686 ddir);
1687 ++sg_index;
1673 } 1688 }
1674 1689
1675#ifdef CCISS_DEBUG 1690#ifdef CCISS_DEBUG
@@ -1781,10 +1796,10 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1781 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1796 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
1782 1797
1783 /* This is a hardware imposed limit. */ 1798 /* This is a hardware imposed limit. */
1784 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES); 1799 blk_queue_max_hw_segments(disk->queue, h->maxsgentries);
1785 1800
1786 /* This is a limit in the driver and could be eliminated. */ 1801 /* This is a limit in the driver and could be eliminated. */
1787 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); 1802 blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
1788 1803
1789 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); 1804 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1790 1805
@@ -3063,9 +3078,13 @@ static void do_cciss_request(struct request_queue *q)
3063 int seg; 3078 int seg;
3064 struct request *creq; 3079 struct request *creq;
3065 u64bit temp64; 3080 u64bit temp64;
3066 struct scatterlist tmp_sg[MAXSGENTRIES]; 3081 struct scatterlist *tmp_sg;
3082 SGDescriptor_struct *curr_sg;
3067 drive_info_struct *drv; 3083 drive_info_struct *drv;
3068 int i, dir; 3084 int i, dir;
3085 int nseg = 0;
3086 int sg_index = 0;
3087 int chained = 0;
3069 3088
3070 /* We call start_io here in case there is a command waiting on the 3089 /* We call start_io here in case there is a command waiting on the
3071 * queue that has not been sent. 3090 * queue that has not been sent.
@@ -3078,13 +3097,14 @@ static void do_cciss_request(struct request_queue *q)
3078 if (!creq) 3097 if (!creq)
3079 goto startio; 3098 goto startio;
3080 3099
3081 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); 3100 BUG_ON(creq->nr_phys_segments > h->maxsgentries);
3082 3101
3083 if ((c = cmd_alloc(h, 1)) == NULL) 3102 if ((c = cmd_alloc(h, 1)) == NULL)
3084 goto full; 3103 goto full;
3085 3104
3086 blk_start_request(creq); 3105 blk_start_request(creq);
3087 3106
3107 tmp_sg = h->scatter_list[c->cmdindex];
3088 spin_unlock_irq(q->queue_lock); 3108 spin_unlock_irq(q->queue_lock);
3089 3109
3090 c->cmd_type = CMD_RWREQ; 3110 c->cmd_type = CMD_RWREQ;
@@ -3113,7 +3133,7 @@ static void do_cciss_request(struct request_queue *q)
3113 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3133 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
3114#endif /* CCISS_DEBUG */ 3134#endif /* CCISS_DEBUG */
3115 3135
3116 sg_init_table(tmp_sg, MAXSGENTRIES); 3136 sg_init_table(tmp_sg, h->maxsgentries);
3117 seg = blk_rq_map_sg(q, creq, tmp_sg); 3137 seg = blk_rq_map_sg(q, creq, tmp_sg);
3118 3138
3119 /* get the DMA records for the setup */ 3139 /* get the DMA records for the setup */
@@ -3122,25 +3142,70 @@ static void do_cciss_request(struct request_queue *q)
3122 else 3142 else
3123 dir = PCI_DMA_TODEVICE; 3143 dir = PCI_DMA_TODEVICE;
3124 3144
3145 curr_sg = c->SG;
3146 sg_index = 0;
3147 chained = 0;
3148
3125 for (i = 0; i < seg; i++) { 3149 for (i = 0; i < seg; i++) {
3126 c->SG[i].Len = tmp_sg[i].length; 3150 if (((sg_index+1) == (h->max_cmd_sgentries)) &&
3151 !chained && ((seg - i) > 1)) {
3152 nseg = seg - i;
3153 curr_sg[sg_index].Len = (nseg) *
3154 sizeof(SGDescriptor_struct);
3155 curr_sg[sg_index].Ext = CCISS_SG_CHAIN;
3156
3157 /* Point to next chain block. */
3158 curr_sg = h->cmd_sg_list[c->cmdindex]->sgchain;
3159 sg_index = 0;
3160 chained = 1;
3161 }
3162 curr_sg[sg_index].Len = tmp_sg[i].length;
3127 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), 3163 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
3128 tmp_sg[i].offset, 3164 tmp_sg[i].offset,
3129 tmp_sg[i].length, dir); 3165 tmp_sg[i].length, dir);
3130 c->SG[i].Addr.lower = temp64.val32.lower; 3166 curr_sg[sg_index].Addr.lower = temp64.val32.lower;
3131 c->SG[i].Addr.upper = temp64.val32.upper; 3167 curr_sg[sg_index].Addr.upper = temp64.val32.upper;
3132 c->SG[i].Ext = 0; // we are not chaining 3168 curr_sg[sg_index].Ext = 0; /* we are not chaining */
3169
3170 ++sg_index;
3133 } 3171 }
3172
3173 if (chained) {
3174 int len;
3175 curr_sg = c->SG;
3176 sg_index = h->max_cmd_sgentries - 1;
3177 len = curr_sg[sg_index].Len;
3178 /* Setup pointer to next chain block.
3179 * Fill out last element in current chain
3180 * block with address of next chain block.
3181 */
3182 temp64.val = pci_map_single(h->pdev,
3183 h->cmd_sg_list[c->cmdindex]->sgchain,
3184 len, dir);
3185
3186 h->cmd_sg_list[c->cmdindex]->sg_chain_dma = temp64.val;
3187 curr_sg[sg_index].Addr.lower = temp64.val32.lower;
3188 curr_sg[sg_index].Addr.upper = temp64.val32.upper;
3189
3190 pci_dma_sync_single_for_device(h->pdev,
3191 h->cmd_sg_list[c->cmdindex]->sg_chain_dma,
3192 len, dir);
3193 }
3194
3134 /* track how many SG entries we are using */ 3195 /* track how many SG entries we are using */
3135 if (seg > h->maxSG) 3196 if (seg > h->maxSG)
3136 h->maxSG = seg; 3197 h->maxSG = seg;
3137 3198
3138#ifdef CCISS_DEBUG 3199#ifdef CCISS_DEBUG
3139 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n", 3200 printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments "
3140 blk_rq_sectors(creq), seg); 3201 "chained[%d]\n",
3202 blk_rq_sectors(creq), seg, chained);
3141#endif /* CCISS_DEBUG */ 3203#endif /* CCISS_DEBUG */
3142 3204
3143 c->Header.SGList = c->Header.SGTotal = seg; 3205 c->Header.SGList = c->Header.SGTotal = seg + chained;
3206 if (seg > h->max_cmd_sgentries)
3207 c->Header.SGList = h->max_cmd_sgentries;
3208
3144 if (likely(blk_fs_request(creq))) { 3209 if (likely(blk_fs_request(creq))) {
3145 if(h->cciss_read == CCISS_READ_10) { 3210 if(h->cciss_read == CCISS_READ_10) {
3146 c->Request.CDB[1] = 0; 3211 c->Request.CDB[1] = 0;
@@ -3713,6 +3778,23 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3713 * leave a little room for ioctl calls. 3778 * leave a little room for ioctl calls.
3714 */ 3779 */
3715 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 3780 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3781 c->maxsgentries = readl(&(c->cfgtable->MaxSGElements));
3782
3783 /*
3784 * Limit native command to 32 s/g elements to save dma'able memory.
3785 * Howvever spec says if 0, use 31
3786 */
3787
3788 c->max_cmd_sgentries = 31;
3789 if (c->maxsgentries > 512) {
3790 c->max_cmd_sgentries = 32;
3791 c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1;
3792 c->maxsgentries -= 1; /* account for chain pointer */
3793 } else {
3794 c->maxsgentries = 31; /* Default to traditional value */
3795 c->chainsize = 0; /* traditional */
3796 }
3797
3716 c->product_name = products[prod_index].product_name; 3798 c->product_name = products[prod_index].product_name;
3717 c->access = *(products[prod_index].access); 3799 c->access = *(products[prod_index].access);
3718 c->nr_cmds = c->max_commands - 4; 3800 c->nr_cmds = c->max_commands - 4;
@@ -4039,6 +4121,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4039{ 4121{
4040 int i; 4122 int i;
4041 int j = 0; 4123 int j = 0;
4124 int k = 0;
4042 int rc; 4125 int rc;
4043 int dac, return_code; 4126 int dac, return_code;
4044 InquiryData_struct *inq_buff; 4127 InquiryData_struct *inq_buff;
@@ -4142,6 +4225,53 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4142 printk(KERN_ERR "cciss: out of memory"); 4225 printk(KERN_ERR "cciss: out of memory");
4143 goto clean4; 4226 goto clean4;
4144 } 4227 }
4228
4229 /* Need space for temp scatter list */
4230 hba[i]->scatter_list = kmalloc(hba[i]->max_commands *
4231 sizeof(struct scatterlist *),
4232 GFP_KERNEL);
4233 for (k = 0; k < hba[i]->nr_cmds; k++) {
4234 hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4235 hba[i]->maxsgentries,
4236 GFP_KERNEL);
4237 if (hba[i]->scatter_list[k] == NULL) {
4238 printk(KERN_ERR "cciss%d: could not allocate "
4239 "s/g lists\n", i);
4240 goto clean4;
4241 }
4242 }
4243 hba[i]->cmd_sg_list = kmalloc(sizeof(struct Cmd_sg_list *) *
4244 hba[i]->nr_cmds,
4245 GFP_KERNEL);
4246 if (!hba[i]->cmd_sg_list) {
4247 printk(KERN_ERR "cciss%d: Cannot get memory for "
4248 "s/g chaining.\n", i);
4249 goto clean4;
4250 }
4251 /* Build up chain blocks for each command */
4252 if (hba[i]->chainsize > 0) {
4253 for (j = 0; j < hba[i]->nr_cmds; j++) {
4254 hba[i]->cmd_sg_list[j] =
4255 kmalloc(sizeof(struct Cmd_sg_list),
4256 GFP_KERNEL);
4257 if (!hba[i]->cmd_sg_list[j]) {
4258 printk(KERN_ERR "cciss%d: Cannot get memory "
4259 "for chain block.\n", i);
4260 goto clean4;
4261 }
4262 /* Need a block of chainsized s/g elements. */
4263 hba[i]->cmd_sg_list[j]->sgchain =
4264 kmalloc((hba[i]->chainsize *
4265 sizeof(SGDescriptor_struct)),
4266 GFP_KERNEL);
4267 if (!hba[i]->cmd_sg_list[j]->sgchain) {
4268 printk(KERN_ERR "cciss%d: Cannot get memory "
4269 "for s/g chains\n", i);
4270 goto clean4;
4271 }
4272 }
4273 }
4274
4145 spin_lock_init(&hba[i]->lock); 4275 spin_lock_init(&hba[i]->lock);
4146 4276
4147 /* Initialize the pdev driver private data. 4277 /* Initialize the pdev driver private data.
@@ -4187,7 +4317,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4187 4317
4188 cciss_procinit(i); 4318 cciss_procinit(i);
4189 4319
4190 hba[i]->cciss_max_sectors = 2048; 4320 hba[i]->cciss_max_sectors = 8192;
4191 4321
4192 rebuild_lun_table(hba[i], 1, 0); 4322 rebuild_lun_table(hba[i], 1, 0);
4193 hba[i]->busy_initializing = 0; 4323 hba[i]->busy_initializing = 0;
@@ -4195,6 +4325,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4195 4325
4196clean4: 4326clean4:
4197 kfree(hba[i]->cmd_pool_bits); 4327 kfree(hba[i]->cmd_pool_bits);
4328 /* Free up sg elements */
4329 for (k = 0; k < hba[i]->nr_cmds; k++)
4330 kfree(hba[i]->scatter_list[k]);
4331 kfree(hba[i]->scatter_list);
4332 for (j = 0; j < hba[i]->nr_cmds; j++) {
4333 if (hba[i]->cmd_sg_list[j])
4334 kfree(hba[i]->cmd_sg_list[j]->sgchain);
4335 kfree(hba[i]->cmd_sg_list[j]);
4336 }
4198 if (hba[i]->cmd_pool) 4337 if (hba[i]->cmd_pool)
4199 pci_free_consistent(hba[i]->pdev, 4338 pci_free_consistent(hba[i]->pdev,
4200 hba[i]->nr_cmds * sizeof(CommandList_struct), 4339 hba[i]->nr_cmds * sizeof(CommandList_struct),
@@ -4308,6 +4447,14 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4308 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4447 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
4309 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 4448 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
4310 kfree(hba[i]->cmd_pool_bits); 4449 kfree(hba[i]->cmd_pool_bits);
4450 /* Free up sg elements */
4451 for (j = 0; j < hba[i]->nr_cmds; j++)
4452 kfree(hba[i]->scatter_list[j]);
4453 kfree(hba[i]->scatter_list);
4454 for (j = 0; j < hba[i]->nr_cmds; j++) {
4455 kfree(hba[i]->cmd_sg_list[j]->sgchain);
4456 kfree(hba[i]->cmd_sg_list[j]);
4457 }
4311 /* 4458 /*
4312 * Deliberately omit pci_disable_device(): it does something nasty to 4459 * Deliberately omit pci_disable_device(): it does something nasty to
4313 * Smart Array controllers that pci_enable_device does not undo 4460 * Smart Array controllers that pci_enable_device does not undo