aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2014-01-21 13:16:41 -0500
committerJames Bottomley <JBottomley@Parallels.com>2014-03-19 18:04:39 -0400
commit6270e5932a01d6536dbce27782e8adf2180598d8 (patch)
treebd5bde920c0a703b5798783357c7675e02ca31c0 /drivers/scsi
parentf94d9964626fd75b22878cc97ffebf287415f64e (diff)
[SCSI] ipr: Handle early EEH
If, when the ipr driver loads, the adapter is in an EEH error state, it will currently oops and not be able to recover, as it attempts to access memory that has not yet been allocated. We've seen this occur in some kexec scenarios. The following patch fixes the oops and also allows the driver to recover from these probe time EEH errors. [jejb: checkpatch fix] Signed-off-by: Wen Xiong <wenxiong@linux.vnet.ibm.com> Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ipr.c265
-rw-r--r--drivers/scsi/ipr.h3
2 files changed, 179 insertions, 89 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dfaa47f94fbc..25dc6e594f71 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8645,6 +8645,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8645} 8645}
8646 8646
8647/** 8647/**
8648 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8649 * @pdev: PCI device struct
8650 *
8651 * Description: This routine is called to tell us that the MMIO
8652 * access to the IOA has been restored
8653 */
8654static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8655{
8656 unsigned long flags = 0;
8657 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8658
8659 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8660 if (!ioa_cfg->probe_done)
8661 pci_save_state(pdev);
8662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8663 return PCI_ERS_RESULT_NEED_RESET;
8664}
8665
8666/**
8648 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 8667 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8649 * @pdev: PCI device struct 8668 * @pdev: PCI device struct
8650 * 8669 *
@@ -8658,7 +8677,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev)
8658 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8677 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8659 8678
8660 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8679 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8661 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 8680 if (ioa_cfg->probe_done)
8681 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8682 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8663} 8683}
8664 8684
@@ -8676,11 +8696,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8676 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8696 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8677 8697
8678 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8698 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8679 if (ioa_cfg->needs_warm_reset) 8699 if (ioa_cfg->probe_done) {
8680 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8700 if (ioa_cfg->needs_warm_reset)
8681 else 8701 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8682 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 8702 else
8683 IPR_SHUTDOWN_NONE); 8703 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8704 IPR_SHUTDOWN_NONE);
8705 } else
8706 wake_up_all(&ioa_cfg->eeh_wait_q);
8684 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8685 return PCI_ERS_RESULT_RECOVERED; 8708 return PCI_ERS_RESULT_RECOVERED;
8686} 8709}
@@ -8699,17 +8722,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8699 int i; 8722 int i;
8700 8723
8701 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8724 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8702 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8725 if (ioa_cfg->probe_done) {
8703 ioa_cfg->sdt_state = ABORT_DUMP; 8726 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8704 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 8727 ioa_cfg->sdt_state = ABORT_DUMP;
8705 ioa_cfg->in_ioa_bringdown = 1; 8728 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8706 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8729 ioa_cfg->in_ioa_bringdown = 1;
8707 spin_lock(&ioa_cfg->hrrq[i]._lock); 8730 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8708 ioa_cfg->hrrq[i].allow_cmds = 0; 8731 spin_lock(&ioa_cfg->hrrq[i]._lock);
8709 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8732 ioa_cfg->hrrq[i].allow_cmds = 0;
8710 } 8733 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8711 wmb(); 8734 }
8712 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8735 wmb();
8736 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8737 } else
8738 wake_up_all(&ioa_cfg->eeh_wait_q);
8713 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8739 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8714} 8740}
8715 8741
@@ -8729,7 +8755,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8729 switch (state) { 8755 switch (state) {
8730 case pci_channel_io_frozen: 8756 case pci_channel_io_frozen:
8731 ipr_pci_frozen(pdev); 8757 ipr_pci_frozen(pdev);
8732 return PCI_ERS_RESULT_NEED_RESET; 8758 return PCI_ERS_RESULT_CAN_RECOVER;
8733 case pci_channel_io_perm_failure: 8759 case pci_channel_io_perm_failure:
8734 ipr_pci_perm_failure(pdev); 8760 ipr_pci_perm_failure(pdev);
8735 return PCI_ERS_RESULT_DISCONNECT; 8761 return PCI_ERS_RESULT_DISCONNECT;
@@ -8759,6 +8785,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8759 ENTER; 8785 ENTER;
8760 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8786 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8761 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 8787 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8788 ioa_cfg->probe_done = 1;
8762 if (ioa_cfg->needs_hard_reset) { 8789 if (ioa_cfg->needs_hard_reset) {
8763 ioa_cfg->needs_hard_reset = 0; 8790 ioa_cfg->needs_hard_reset = 0;
8764 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8791 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9034,16 +9061,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9034 if (!ioa_cfg->vpd_cbs) 9061 if (!ioa_cfg->vpd_cbs)
9035 goto out_free_res_entries; 9062 goto out_free_res_entries;
9036 9063
9037 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9038 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9039 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9040 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9041 if (i == 0)
9042 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9043 else
9044 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9045 }
9046
9047 if (ipr_alloc_cmd_blks(ioa_cfg)) 9064 if (ipr_alloc_cmd_blks(ioa_cfg))
9048 goto out_free_vpd_cbs; 9065 goto out_free_vpd_cbs;
9049 9066
@@ -9144,6 +9161,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9144} 9161}
9145 9162
9146/** 9163/**
9164 * ipr_init_regs - Initialize IOA registers
9165 * @ioa_cfg: ioa config struct
9166 *
9167 * Return value:
9168 * none
9169 **/
9170static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9171{
9172 const struct ipr_interrupt_offsets *p;
9173 struct ipr_interrupts *t;
9174 void __iomem *base;
9175
9176 p = &ioa_cfg->chip_cfg->regs;
9177 t = &ioa_cfg->regs;
9178 base = ioa_cfg->hdw_dma_regs;
9179
9180 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9181 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9182 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9183 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9184 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9185 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9186 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9187 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9188 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9189 t->ioarrin_reg = base + p->ioarrin_reg;
9190 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9191 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9192 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9193 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9194 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9195 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9196
9197 if (ioa_cfg->sis64) {
9198 t->init_feedback_reg = base + p->init_feedback_reg;
9199 t->dump_addr_reg = base + p->dump_addr_reg;
9200 t->dump_data_reg = base + p->dump_data_reg;
9201 t->endian_swap_reg = base + p->endian_swap_reg;
9202 }
9203}
9204
9205/**
9147 * ipr_init_ioa_cfg - Initialize IOA config struct 9206 * ipr_init_ioa_cfg - Initialize IOA config struct
9148 * @ioa_cfg: ioa config struct 9207 * @ioa_cfg: ioa config struct
9149 * @host: scsi host struct 9208 * @host: scsi host struct
@@ -9155,9 +9214,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9155static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9214static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9156 struct Scsi_Host *host, struct pci_dev *pdev) 9215 struct Scsi_Host *host, struct pci_dev *pdev)
9157{ 9216{
9158 const struct ipr_interrupt_offsets *p; 9217 int i;
9159 struct ipr_interrupts *t;
9160 void __iomem *base;
9161 9218
9162 ioa_cfg->host = host; 9219 ioa_cfg->host = host;
9163 ioa_cfg->pdev = pdev; 9220 ioa_cfg->pdev = pdev;
@@ -9177,6 +9234,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9177 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9234 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9178 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9235 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9179 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9236 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9237 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9180 ioa_cfg->sdt_state = INACTIVE; 9238 ioa_cfg->sdt_state = INACTIVE;
9181 9239
9182 ipr_initialize_bus_attr(ioa_cfg); 9240 ipr_initialize_bus_attr(ioa_cfg);
@@ -9187,44 +9245,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9187 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9245 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9188 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9246 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9189 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9247 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9248 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9249 + ((sizeof(struct ipr_config_table_entry64)
9250 * ioa_cfg->max_devs_supported)));
9190 } else { 9251 } else {
9191 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9252 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9192 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9253 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9193 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9254 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9194 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9255 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9256 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9257 + ((sizeof(struct ipr_config_table_entry)
9258 * ioa_cfg->max_devs_supported)));
9195 } 9259 }
9260
9196 host->max_channel = IPR_MAX_BUS_TO_SCAN; 9261 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9197 host->unique_id = host->host_no; 9262 host->unique_id = host->host_no;
9198 host->max_cmd_len = IPR_MAX_CDB_LEN; 9263 host->max_cmd_len = IPR_MAX_CDB_LEN;
9199 host->can_queue = ioa_cfg->max_cmds; 9264 host->can_queue = ioa_cfg->max_cmds;
9200 pci_set_drvdata(pdev, ioa_cfg); 9265 pci_set_drvdata(pdev, ioa_cfg);
9201 9266
9202 p = &ioa_cfg->chip_cfg->regs; 9267 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9203 t = &ioa_cfg->regs; 9268 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9204 base = ioa_cfg->hdw_dma_regs; 9269 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9205 9270 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9206 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9271 if (i == 0)
9207 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9272 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9208 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9273 else
9209 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9274 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9210 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9211 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9212 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9213 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9214 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9215 t->ioarrin_reg = base + p->ioarrin_reg;
9216 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9217 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9218 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9219 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9220 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9221 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9222
9223 if (ioa_cfg->sis64) {
9224 t->init_feedback_reg = base + p->init_feedback_reg;
9225 t->dump_addr_reg = base + p->dump_addr_reg;
9226 t->dump_data_reg = base + p->dump_data_reg;
9227 t->endian_swap_reg = base + p->endian_swap_reg;
9228 } 9275 }
9229} 9276}
9230 9277
@@ -9247,6 +9294,26 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
9247 return NULL; 9294 return NULL;
9248} 9295}
9249 9296
9297/**
9298 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9299 * during probe time
9300 * @ioa_cfg: ioa config struct
9301 *
9302 * Return value:
9303 * None
9304 **/
9305static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9306{
9307 struct pci_dev *pdev = ioa_cfg->pdev;
9308
9309 if (pci_channel_offline(pdev)) {
9310 wait_event_timeout(ioa_cfg->eeh_wait_q,
9311 !pci_channel_offline(pdev),
9312 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9313 pci_restore_state(pdev);
9314 }
9315}
9316
9250static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) 9317static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9251{ 9318{
9252 struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; 9319 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
@@ -9261,6 +9328,7 @@ static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9261 vectors = err; 9328 vectors = err;
9262 9329
9263 if (err < 0) { 9330 if (err < 0) {
9331 ipr_wait_for_pci_err_recovery(ioa_cfg);
9264 pci_disable_msix(ioa_cfg->pdev); 9332 pci_disable_msix(ioa_cfg->pdev);
9265 return err; 9333 return err;
9266 } 9334 }
@@ -9284,6 +9352,7 @@ static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9284 vectors = err; 9352 vectors = err;
9285 9353
9286 if (err < 0) { 9354 if (err < 0) {
9355 ipr_wait_for_pci_err_recovery(ioa_cfg);
9287 pci_disable_msi(ioa_cfg->pdev); 9356 pci_disable_msi(ioa_cfg->pdev);
9288 return err; 9357 return err;
9289 } 9358 }
@@ -9438,19 +9507,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9438 9507
9439 ENTER; 9508 ENTER;
9440 9509
9441 if ((rc = pci_enable_device(pdev))) {
9442 dev_err(&pdev->dev, "Cannot enable adapter\n");
9443 goto out;
9444 }
9445
9446 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 9510 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9447
9448 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 9511 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9449 9512
9450 if (!host) { 9513 if (!host) {
9451 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 9514 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9452 rc = -ENOMEM; 9515 rc = -ENOMEM;
9453 goto out_disable; 9516 goto out;
9454 } 9517 }
9455 9518
9456 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 9519 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9480,6 +9543,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9480 9543
9481 ioa_cfg->revid = pdev->revision; 9544 ioa_cfg->revid = pdev->revision;
9482 9545
9546 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9547
9483 ipr_regs_pci = pci_resource_start(pdev, 0); 9548 ipr_regs_pci = pci_resource_start(pdev, 0);
9484 9549
9485 rc = pci_request_regions(pdev, IPR_NAME); 9550 rc = pci_request_regions(pdev, IPR_NAME);
@@ -9489,22 +9554,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9489 goto out_scsi_host_put; 9554 goto out_scsi_host_put;
9490 } 9555 }
9491 9556
9557 rc = pci_enable_device(pdev);
9558
9559 if (rc || pci_channel_offline(pdev)) {
9560 if (pci_channel_offline(pdev)) {
9561 ipr_wait_for_pci_err_recovery(ioa_cfg);
9562 rc = pci_enable_device(pdev);
9563 }
9564
9565 if (rc) {
9566 dev_err(&pdev->dev, "Cannot enable adapter\n");
9567 ipr_wait_for_pci_err_recovery(ioa_cfg);
9568 goto out_release_regions;
9569 }
9570 }
9571
9492 ipr_regs = pci_ioremap_bar(pdev, 0); 9572 ipr_regs = pci_ioremap_bar(pdev, 0);
9493 9573
9494 if (!ipr_regs) { 9574 if (!ipr_regs) {
9495 dev_err(&pdev->dev, 9575 dev_err(&pdev->dev,
9496 "Couldn't map memory range of registers\n"); 9576 "Couldn't map memory range of registers\n");
9497 rc = -ENOMEM; 9577 rc = -ENOMEM;
9498 goto out_release_regions; 9578 goto out_disable;
9499 } 9579 }
9500 9580
9501 ioa_cfg->hdw_dma_regs = ipr_regs; 9581 ioa_cfg->hdw_dma_regs = ipr_regs;
9502 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 9582 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9503 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 9583 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9504 9584
9505 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 9585 ipr_init_regs(ioa_cfg);
9506
9507 pci_set_master(pdev);
9508 9586
9509 if (ioa_cfg->sis64) { 9587 if (ioa_cfg->sis64) {
9510 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 9588 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9512,7 +9590,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9512 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 9590 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9513 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9591 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9514 } 9592 }
9515
9516 } else 9593 } else
9517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9594 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9518 9595
@@ -9526,10 +9603,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9526 9603
9527 if (rc != PCIBIOS_SUCCESSFUL) { 9604 if (rc != PCIBIOS_SUCCESSFUL) {
9528 dev_err(&pdev->dev, "Write of cache line size failed\n"); 9605 dev_err(&pdev->dev, "Write of cache line size failed\n");
9606 ipr_wait_for_pci_err_recovery(ioa_cfg);
9529 rc = -EIO; 9607 rc = -EIO;
9530 goto cleanup_nomem; 9608 goto cleanup_nomem;
9531 } 9609 }
9532 9610
9611 /* Issue MMIO read to ensure card is not in EEH */
9612 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9613 ipr_wait_for_pci_err_recovery(ioa_cfg);
9614
9533 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 9615 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9534 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 9616 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9535 IPR_MAX_MSIX_VECTORS); 9617 IPR_MAX_MSIX_VECTORS);
@@ -9548,10 +9630,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9548 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9630 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9549 } 9631 }
9550 9632
9633 pci_set_master(pdev);
9634
9635 if (pci_channel_offline(pdev)) {
9636 ipr_wait_for_pci_err_recovery(ioa_cfg);
9637 pci_set_master(pdev);
9638 if (pci_channel_offline(pdev)) {
9639 rc = -EIO;
9640 goto out_msi_disable;
9641 }
9642 }
9643
9551 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9644 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9552 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9645 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9553 rc = ipr_test_msi(ioa_cfg, pdev); 9646 rc = ipr_test_msi(ioa_cfg, pdev);
9554 if (rc == -EOPNOTSUPP) { 9647 if (rc == -EOPNOTSUPP) {
9648 ipr_wait_for_pci_err_recovery(ioa_cfg);
9555 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9649 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9556 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9650 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9557 pci_disable_msi(pdev); 9651 pci_disable_msi(pdev);
@@ -9581,30 +9675,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9581 (unsigned int)num_online_cpus(), 9675 (unsigned int)num_online_cpus(),
9582 (unsigned int)IPR_MAX_HRRQ_NUM); 9676 (unsigned int)IPR_MAX_HRRQ_NUM);
9583 9677
9584 /* Save away PCI config space for use following IOA reset */
9585 rc = pci_save_state(pdev);
9586
9587 if (rc != PCIBIOS_SUCCESSFUL) {
9588 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9589 rc = -EIO;
9590 goto out_msi_disable;
9591 }
9592
9593 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 9678 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9594 goto out_msi_disable; 9679 goto out_msi_disable;
9595 9680
9596 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 9681 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9597 goto out_msi_disable; 9682 goto out_msi_disable;
9598 9683
9599 if (ioa_cfg->sis64)
9600 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9601 + ((sizeof(struct ipr_config_table_entry64)
9602 * ioa_cfg->max_devs_supported)));
9603 else
9604 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9605 + ((sizeof(struct ipr_config_table_entry)
9606 * ioa_cfg->max_devs_supported)));
9607
9608 rc = ipr_alloc_mem(ioa_cfg); 9684 rc = ipr_alloc_mem(ioa_cfg);
9609 if (rc < 0) { 9685 if (rc < 0) {
9610 dev_err(&pdev->dev, 9686 dev_err(&pdev->dev,
@@ -9612,6 +9688,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9612 goto out_msi_disable; 9688 goto out_msi_disable;
9613 } 9689 }
9614 9690
9691 /* Save away PCI config space for use following IOA reset */
9692 rc = pci_save_state(pdev);
9693
9694 if (rc != PCIBIOS_SUCCESSFUL) {
9695 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9696 rc = -EIO;
9697 goto cleanup_nolog;
9698 }
9699
9615 /* 9700 /*
9616 * If HRRQ updated interrupt is not masked, or reset alert is set, 9701 * If HRRQ updated interrupt is not masked, or reset alert is set,
9617 * the card is in an unknown state and needs a hard reset 9702 * the card is in an unknown state and needs a hard reset
@@ -9668,18 +9753,19 @@ out:
9668cleanup_nolog: 9753cleanup_nolog:
9669 ipr_free_mem(ioa_cfg); 9754 ipr_free_mem(ioa_cfg);
9670out_msi_disable: 9755out_msi_disable:
9756 ipr_wait_for_pci_err_recovery(ioa_cfg);
9671 if (ioa_cfg->intr_flag == IPR_USE_MSI) 9757 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9672 pci_disable_msi(pdev); 9758 pci_disable_msi(pdev);
9673 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9759 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9674 pci_disable_msix(pdev); 9760 pci_disable_msix(pdev);
9675cleanup_nomem: 9761cleanup_nomem:
9676 iounmap(ipr_regs); 9762 iounmap(ipr_regs);
9763out_disable:
9764 pci_disable_device(pdev);
9677out_release_regions: 9765out_release_regions:
9678 pci_release_regions(pdev); 9766 pci_release_regions(pdev);
9679out_scsi_host_put: 9767out_scsi_host_put:
9680 scsi_host_put(host); 9768 scsi_host_put(host);
9681out_disable:
9682 pci_disable_device(pdev);
9683 goto out; 9769 goto out;
9684} 9770}
9685 9771
@@ -10017,6 +10103,7 @@ MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10017 10103
10018static const struct pci_error_handlers ipr_err_handler = { 10104static const struct pci_error_handlers ipr_err_handler = {
10019 .error_detected = ipr_pci_error_detected, 10105 .error_detected = ipr_pci_error_detected,
10106 .mmio_enabled = ipr_pci_mmio_enabled,
10020 .slot_reset = ipr_pci_slot_reset, 10107 .slot_reset = ipr_pci_slot_reset,
10021}; 10108};
10022 10109
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c756ff0abc96..649d8f697147 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -231,6 +231,7 @@
231#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) 231#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
232#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 232#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
233#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 233#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
234#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
234#define IPR_PCI_RESET_TIMEOUT (HZ / 2) 235#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
235#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ) 236#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
236#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ) 237#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
@@ -1443,6 +1444,7 @@ struct ipr_ioa_cfg {
1443 u8 dump_timeout:1; 1444 u8 dump_timeout:1;
1444 u8 cfg_locked:1; 1445 u8 cfg_locked:1;
1445 u8 clear_isr:1; 1446 u8 clear_isr:1;
1447 u8 probe_done:1;
1446 1448
1447 u8 revid; 1449 u8 revid;
1448 1450
@@ -1521,6 +1523,7 @@ struct ipr_ioa_cfg {
1521 1523
1522 wait_queue_head_t reset_wait_q; 1524 wait_queue_head_t reset_wait_q;
1523 wait_queue_head_t msi_wait_q; 1525 wait_queue_head_t msi_wait_q;
1526 wait_queue_head_t eeh_wait_q;
1524 1527
1525 struct ipr_dump *dump; 1528 struct ipr_dump *dump;
1526 enum ipr_sdt_state sdt_state; 1529 enum ipr_sdt_state sdt_state;