aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c221
1 files changed, 172 insertions, 49 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f1eea5df54c6..17aea2d1ec7a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -108,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
108 .max_cmds = 100, 108 .max_cmds = 100,
109 .cache_line_size = 0x20, 109 .cache_line_size = 0x20,
110 .clear_isr = 1, 110 .clear_isr = 1,
111 .iopoll_weight = 0,
111 { 112 {
112 .set_interrupt_mask_reg = 0x0022C, 113 .set_interrupt_mask_reg = 0x0022C,
113 .clr_interrupt_mask_reg = 0x00230, 114 .clr_interrupt_mask_reg = 0x00230,
@@ -132,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
132 .max_cmds = 100, 133 .max_cmds = 100,
133 .cache_line_size = 0x20, 134 .cache_line_size = 0x20,
134 .clear_isr = 1, 135 .clear_isr = 1,
136 .iopoll_weight = 0,
135 { 137 {
136 .set_interrupt_mask_reg = 0x00288, 138 .set_interrupt_mask_reg = 0x00288,
137 .clr_interrupt_mask_reg = 0x0028C, 139 .clr_interrupt_mask_reg = 0x0028C,
@@ -156,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
156 .max_cmds = 1000, 158 .max_cmds = 1000,
157 .cache_line_size = 0x20, 159 .cache_line_size = 0x20,
158 .clear_isr = 0, 160 .clear_isr = 0,
161 .iopoll_weight = 64,
159 { 162 {
160 .set_interrupt_mask_reg = 0x00010, 163 .set_interrupt_mask_reg = 0x00010,
161 .clr_interrupt_mask_reg = 0x00018, 164 .clr_interrupt_mask_reg = 0x00018,
@@ -3560,6 +3563,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
3560 .store = ipr_store_reset_adapter 3563 .store = ipr_store_reset_adapter
3561}; 3564};
3562 3565
3566static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3567 /**
3568 * ipr_show_iopoll_weight - Show ipr polling mode
3569 * @dev: class device struct
3570 * @buf: buffer
3571 *
3572 * Return value:
3573 * number of bytes printed to buffer
3574 **/
3575static ssize_t ipr_show_iopoll_weight(struct device *dev,
3576 struct device_attribute *attr, char *buf)
3577{
3578 struct Scsi_Host *shost = class_to_shost(dev);
3579 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3580 unsigned long lock_flags = 0;
3581 int len;
3582
3583 spin_lock_irqsave(shost->host_lock, lock_flags);
3584 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3585 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3586
3587 return len;
3588}
3589
3590/**
3591 * ipr_store_iopoll_weight - Change the adapter's polling mode
3592 * @dev: class device struct
3593 * @buf: buffer
3594 *
3595 * Return value:
3596 * number of bytes printed to buffer
3597 **/
3598static ssize_t ipr_store_iopoll_weight(struct device *dev,
3599 struct device_attribute *attr,
3600 const char *buf, size_t count)
3601{
3602 struct Scsi_Host *shost = class_to_shost(dev);
3603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3604 unsigned long user_iopoll_weight;
3605 unsigned long lock_flags = 0;
3606 int i;
3607
3608 if (!ioa_cfg->sis64) {
3609 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3610 return -EINVAL;
3611 }
3612 if (kstrtoul(buf, 10, &user_iopoll_weight))
3613 return -EINVAL;
3614
3615 if (user_iopoll_weight > 256) {
3616 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3617 return -EINVAL;
3618 }
3619
3620 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3621 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3622 return strlen(buf);
3623 }
3624
3625 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3626 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3627 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3628 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3629 }
3630
3631 spin_lock_irqsave(shost->host_lock, lock_flags);
3632 ioa_cfg->iopoll_weight = user_iopoll_weight;
3633 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3634 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3635 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3636 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3637 ioa_cfg->iopoll_weight, ipr_iopoll);
3638 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3639 }
3640 }
3641 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3642
3643 return strlen(buf);
3644}
3645
3646static struct device_attribute ipr_iopoll_weight_attr = {
3647 .attr = {
3648 .name = "iopoll_weight",
3649 .mode = S_IRUGO | S_IWUSR,
3650 },
3651 .show = ipr_show_iopoll_weight,
3652 .store = ipr_store_iopoll_weight
3653};
3654
3563/** 3655/**
3564 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer 3656 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3565 * @buf_len: buffer length 3657 * @buf_len: buffer length
@@ -3928,6 +4020,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3928 &ipr_ioa_reset_attr, 4020 &ipr_ioa_reset_attr,
3929 &ipr_update_fw_attr, 4021 &ipr_update_fw_attr,
3930 &ipr_ioa_fw_type_attr, 4022 &ipr_ioa_fw_type_attr,
4023 &ipr_iopoll_weight_attr,
3931 NULL, 4024 NULL,
3932}; 4025};
3933 4026
@@ -5218,7 +5311,7 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5218 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5311 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5219} 5312}
5220 5313
5221static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, 5314static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5222 struct list_head *doneq) 5315 struct list_head *doneq)
5223{ 5316{
5224 u32 ioasc; 5317 u32 ioasc;
@@ -5260,9 +5353,41 @@ static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5260 hrr_queue->toggle_bit ^= 1u; 5353 hrr_queue->toggle_bit ^= 1u;
5261 } 5354 }
5262 num_hrrq++; 5355 num_hrrq++;
5356 if (budget > 0 && num_hrrq >= budget)
5357 break;
5263 } 5358 }
5359
5264 return num_hrrq; 5360 return num_hrrq;
5265} 5361}
5362
5363static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5364{
5365 struct ipr_ioa_cfg *ioa_cfg;
5366 struct ipr_hrr_queue *hrrq;
5367 struct ipr_cmnd *ipr_cmd, *temp;
5368 unsigned long hrrq_flags;
5369 int completed_ops;
5370 LIST_HEAD(doneq);
5371
5372 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5373 ioa_cfg = hrrq->ioa_cfg;
5374
5375 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5376 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5377
5378 if (completed_ops < budget)
5379 blk_iopoll_complete(iop);
5380 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5381
5382 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5383 list_del(&ipr_cmd->queue);
5384 del_timer(&ipr_cmd->timer);
5385 ipr_cmd->fast_done(ipr_cmd);
5386 }
5387
5388 return completed_ops;
5389}
5390
5266/** 5391/**
5267 * ipr_isr - Interrupt service routine 5392 * ipr_isr - Interrupt service routine
5268 * @irq: irq number 5393 * @irq: irq number
@@ -5277,8 +5402,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5277 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5402 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5278 unsigned long hrrq_flags = 0; 5403 unsigned long hrrq_flags = 0;
5279 u32 int_reg = 0; 5404 u32 int_reg = 0;
5280 u32 ioasc;
5281 u16 cmd_index;
5282 int num_hrrq = 0; 5405 int num_hrrq = 0;
5283 int irq_none = 0; 5406 int irq_none = 0;
5284 struct ipr_cmnd *ipr_cmd, *temp; 5407 struct ipr_cmnd *ipr_cmd, *temp;
@@ -5293,60 +5416,30 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5293 } 5416 }
5294 5417
5295 while (1) { 5418 while (1) {
5296 ipr_cmd = NULL; 5419 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5297 5420 rc = IRQ_HANDLED;
5298 while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5299 hrrq->toggle_bit) {
5300
5301 cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
5302 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5303
5304 if (unlikely(cmd_index > hrrq->max_cmd_id ||
5305 cmd_index < hrrq->min_cmd_id)) {
5306 ipr_isr_eh(ioa_cfg,
5307 "Invalid response handle from IOA: ",
5308 cmd_index);
5309 rc = IRQ_HANDLED;
5310 goto unlock_out;
5311 }
5312
5313 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5314 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5315
5316 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5317
5318 list_move_tail(&ipr_cmd->queue, &doneq);
5319
5320 rc = IRQ_HANDLED;
5321
5322 if (hrrq->hrrq_curr < hrrq->hrrq_end) {
5323 hrrq->hrrq_curr++;
5324 } else {
5325 hrrq->hrrq_curr = hrrq->hrrq_start;
5326 hrrq->toggle_bit ^= 1u;
5327 }
5328 }
5329 5421
5330 if (ipr_cmd && !ioa_cfg->clear_isr) 5422 if (!ioa_cfg->clear_isr)
5331 break; 5423 break;
5332 5424
5333 if (ipr_cmd != NULL) {
5334 /* Clear the PCI interrupt */ 5425 /* Clear the PCI interrupt */
5335 num_hrrq = 0; 5426 num_hrrq = 0;
5336 do { 5427 do {
5337 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5428 writel(IPR_PCII_HRRQ_UPDATED,
5429 ioa_cfg->regs.clr_interrupt_reg32);
5338 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5430 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5339 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5431 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5340 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5432 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5341 5433
5342 } else if (rc == IRQ_NONE && irq_none == 0) { 5434 } else if (rc == IRQ_NONE && irq_none == 0) {
5343 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5435 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5344 irq_none++; 5436 irq_none++;
5345 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5437 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5346 int_reg & IPR_PCII_HRRQ_UPDATED) { 5438 int_reg & IPR_PCII_HRRQ_UPDATED) {
5347 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq); 5439 ipr_isr_eh(ioa_cfg,
5440 "Error clearing HRRQ: ", num_hrrq);
5348 rc = IRQ_HANDLED; 5441 rc = IRQ_HANDLED;
5349 goto unlock_out; 5442 break;
5350 } else 5443 } else
5351 break; 5444 break;
5352 } 5445 }
@@ -5354,7 +5447,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5354 if (unlikely(rc == IRQ_NONE)) 5447 if (unlikely(rc == IRQ_NONE))
5355 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5448 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5356 5449
5357unlock_out:
5358 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5450 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5359 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5451 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5360 list_del(&ipr_cmd->queue); 5452 list_del(&ipr_cmd->queue);
@@ -5375,6 +5467,7 @@ unlock_out:
5375static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5467static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5376{ 5468{
5377 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5469 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5470 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5378 unsigned long hrrq_flags = 0; 5471 unsigned long hrrq_flags = 0;
5379 struct ipr_cmnd *ipr_cmd, *temp; 5472 struct ipr_cmnd *ipr_cmd, *temp;
5380 irqreturn_t rc = IRQ_NONE; 5473 irqreturn_t rc = IRQ_NONE;
@@ -5388,11 +5481,22 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5388 return IRQ_NONE; 5481 return IRQ_NONE;
5389 } 5482 }
5390 5483
5391 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5484 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5392 hrrq->toggle_bit) 5485 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5486 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5487 hrrq->toggle_bit) {
5488 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5489 blk_iopoll_sched(&hrrq->iopoll);
5490 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5491 return IRQ_HANDLED;
5492 }
5493 } else {
5494 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5495 hrrq->toggle_bit)
5393 5496
5394 if (__ipr_process_hrrq(hrrq, &doneq)) 5497 if (ipr_process_hrrq(hrrq, -1, &doneq))
5395 rc = IRQ_HANDLED; 5498 rc = IRQ_HANDLED;
5499 }
5396 5500
5397 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5501 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5398 5502
@@ -9690,7 +9794,7 @@ static void ipr_remove(struct pci_dev *pdev)
9690static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) 9794static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9691{ 9795{
9692 struct ipr_ioa_cfg *ioa_cfg; 9796 struct ipr_ioa_cfg *ioa_cfg;
9693 int rc; 9797 int rc, i;
9694 9798
9695 rc = ipr_probe_ioa(pdev, dev_id); 9799 rc = ipr_probe_ioa(pdev, dev_id);
9696 9800
@@ -9737,6 +9841,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9737 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); 9841 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9738 ioa_cfg->allow_ml_add_del = 1; 9842 ioa_cfg->allow_ml_add_del = 1;
9739 ioa_cfg->host->max_channel = IPR_VSET_BUS; 9843 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9844 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9845
9846 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9847 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9848 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9849 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9850 ioa_cfg->iopoll_weight, ipr_iopoll);
9851 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9852 }
9853 }
9854
9740 schedule_work(&ioa_cfg->work_q); 9855 schedule_work(&ioa_cfg->work_q);
9741 return 0; 9856 return 0;
9742} 9857}
@@ -9755,8 +9870,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
9755{ 9870{
9756 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 9871 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9757 unsigned long lock_flags = 0; 9872 unsigned long lock_flags = 0;
9873 int i;
9758 9874
9759 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9875 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9876 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9877 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9878 ioa_cfg->iopoll_weight = 0;
9879 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9880 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9881 }
9882
9760 while (ioa_cfg->in_reset_reload) { 9883 while (ioa_cfg->in_reset_reload) {
9761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 9884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9762 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9885 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);