aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorWayne Boyer <wayneb@linux.vnet.ibm.com>2009-06-16 18:13:28 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-21 11:52:46 -0400
commit95fecd90397ec1f85eb31ede955d846a86d2077b (patch)
tree979cfdf24378c63203e3a0430d93f7da6f03db82 /drivers/scsi/ipr.c
parenta9e0edb687151617fe89cc5ab0086ebfc73ffccb (diff)
ipr: add test for MSI interrupt support
The return value from pci_enable_msi() can not always be trusted. This patch adds code to generate an interrupt after MSI has been enabled and tests whether or not we can receive and process it. If the tests fails, then fall back to LSI. Signed-off-by: Wayne Boyer <wayneb@linux.vnet.ibm.com> Acked-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c108
1 files changed, 101 insertions, 7 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0f8bc772b112..15ce8e51d5de 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7367,6 +7367,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7367 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7367 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
7368 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 7368 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
7369 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7369 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7370 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7370 ioa_cfg->sdt_state = INACTIVE; 7371 ioa_cfg->sdt_state = INACTIVE;
7371 if (ipr_enable_cache) 7372 if (ipr_enable_cache)
7372 ioa_cfg->cache_state = CACHE_ENABLED; 7373 ioa_cfg->cache_state = CACHE_ENABLED;
@@ -7417,6 +7418,89 @@ ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7417} 7418}
7418 7419
7419/** 7420/**
7421 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
7422 * @pdev: PCI device struct
7423 *
7424 * Description: Simply set the msi_received flag to 1 indicating that
7425 * Message Signaled Interrupts are supported.
7426 *
7427 * Return value:
7428 * 0 on success / non-zero on failure
7429 **/
7430static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
7431{
7432 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
7433 unsigned long lock_flags = 0;
7434 irqreturn_t rc = IRQ_HANDLED;
7435
7436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7437
7438 ioa_cfg->msi_received = 1;
7439 wake_up(&ioa_cfg->msi_wait_q);
7440
7441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7442 return rc;
7443}
7444
7445/**
7446 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
7447 * @pdev: PCI device struct
7448 *
7449 * Description: The return value from pci_enable_msi() can not always be
7450 * trusted. This routine sets up and initiates a test interrupt to determine
7451 * if the interrupt is received via the ipr_test_intr() service routine.
7452 * If the tests fails, the driver will fall back to LSI.
7453 *
7454 * Return value:
7455 * 0 on success / non-zero on failure
7456 **/
7457static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7458 struct pci_dev *pdev)
7459{
7460 int rc;
7461 volatile u32 int_reg;
7462 unsigned long lock_flags = 0;
7463
7464 ENTER;
7465
7466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7467 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7468 ioa_cfg->msi_received = 0;
7469 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7470 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7471 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7473
7474 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
7475 if (rc) {
7476 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
7477 return rc;
7478 } else if (ipr_debug)
7479 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7480
7481 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
7482 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7483 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7484 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7485
7486 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7487 if (!ioa_cfg->msi_received) {
7488 /* MSI test failed */
7489 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
7490 rc = -EOPNOTSUPP;
7491 } else if (ipr_debug)
7492 dev_info(&pdev->dev, "MSI test succeeded.\n");
7493
7494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7495
7496 free_irq(pdev->irq, ioa_cfg);
7497
7498 LEAVE;
7499
7500 return rc;
7501}
7502
7503/**
7420 * ipr_probe_ioa - Allocates memory and does first stage of initialization 7504 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7421 * @pdev: PCI device struct 7505 * @pdev: PCI device struct
7422 * @dev_id: PCI device id struct 7506 * @dev_id: PCI device id struct
@@ -7441,11 +7525,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7441 goto out; 7525 goto out;
7442 } 7526 }
7443 7527
7444 if (!(rc = pci_enable_msi(pdev)))
7445 dev_info(&pdev->dev, "MSI enabled\n");
7446 else if (ipr_debug)
7447 dev_info(&pdev->dev, "Cannot enable MSI\n");
7448
7449 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 7528 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7450 7529
7451 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 7530 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
@@ -7519,6 +7598,18 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7519 goto cleanup_nomem; 7598 goto cleanup_nomem;
7520 } 7599 }
7521 7600
7601 /* Enable MSI style interrupts if they are supported. */
7602 if (!(rc = pci_enable_msi(pdev))) {
7603 rc = ipr_test_msi(ioa_cfg, pdev);
7604 if (rc == -EOPNOTSUPP)
7605 pci_disable_msi(pdev);
7606 else if (rc)
7607 goto out_msi_disable;
7608 else
7609 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
7610 } else if (ipr_debug)
7611 dev_info(&pdev->dev, "Cannot enable MSI.\n");
7612
7522 /* Save away PCI config space for use following IOA reset */ 7613 /* Save away PCI config space for use following IOA reset */
7523 rc = pci_save_state(pdev); 7614 rc = pci_save_state(pdev);
7524 7615
@@ -7556,7 +7647,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7556 ioa_cfg->ioa_unit_checked = 1; 7647 ioa_cfg->ioa_unit_checked = 1;
7557 7648
7558 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 7649 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7559 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); 7650 rc = request_irq(pdev->irq, ipr_isr,
7651 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
7652 IPR_NAME, ioa_cfg);
7560 7653
7561 if (rc) { 7654 if (rc) {
7562 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 7655 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
@@ -7583,12 +7676,13 @@ cleanup_nolog:
7583 ipr_free_mem(ioa_cfg); 7676 ipr_free_mem(ioa_cfg);
7584cleanup_nomem: 7677cleanup_nomem:
7585 iounmap(ipr_regs); 7678 iounmap(ipr_regs);
7679out_msi_disable:
7680 pci_disable_msi(pdev);
7586out_release_regions: 7681out_release_regions:
7587 pci_release_regions(pdev); 7682 pci_release_regions(pdev);
7588out_scsi_host_put: 7683out_scsi_host_put:
7589 scsi_host_put(host); 7684 scsi_host_put(host);
7590out_disable: 7685out_disable:
7591 pci_disable_msi(pdev);
7592 pci_disable_device(pdev); 7686 pci_disable_device(pdev);
7593 goto out; 7687 goto out;
7594} 7688}