aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 21:49:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 21:49:04 -0400
commit1a0b6abaea78f73d9bc0a2f6df2d9e4c917cade1 (patch)
treefaae6f0b72b00a0a4d44cadc607e576e1954a5bc /drivers
parent3e75c6de1ac33fe3500f44573d9212dc82c99f59 (diff)
parentb2bff6ceb61a9a21294f04057d30c9bb4910a88f (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This patch consists of the usual driver updates (megaraid_sas, scsi_debug, qla2xxx, qla4xxx, lpfc, bnx2fc, be2iscsi, hpsa, ipr) plus an assortment of minor fixes and the first precursors of SCSI-MQ (the code path simplifications) and the bug fix for the USB oops on remove (which involves an infrastructure change, so is sent via the main tree with a delayed backport after a cycle in which it is shown to introduce no new bugs)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (196 commits) [SCSI] sd: Quiesce mode sense error messages [SCSI] add support for per-host cmd pools [SCSI] simplify command allocation and freeing a bit [SCSI] megaraid: simplify internal command handling [SCSI] ses: Use vpd information from scsi_device [SCSI] Add EVPD page 0x83 and 0x80 to sysfs [SCSI] Return VPD page length in scsi_vpd_inquiry() [SCSI] scsi_sysfs: Implement 'is_visible' callback [SCSI] hpsa: update driver version to 3.4.4-1 [SCSI] hpsa: fix bad endif placement in RAID 5 mapper code [SCSI] qla2xxx: Fix build errors related to invalid print fields on some architectures. [SCSI] bfa: Replace large udelay() with mdelay() [SCSI] vmw_pvscsi: Some improvements in pvscsi driver. [SCSI] vmw_pvscsi: Add support for I/O requests coalescing. [SCSI] vmw_pvscsi: Fix pvscsi_abort() function. [SCSI] remove deprecated IRQF_DISABLED from SCSI [SCSI] bfa: Updating Maintainers email ids [SCSI] ipr: Add new CCIN definition for Grand Canyon support [SCSI] ipr: Format HCAM overlay ID 0x21 [SCSI] ipr: Use pci_enable_msi_range() and pci_enable_msix_range() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/rx.c9
-rw-r--r--drivers/scsi/aacraid/sa.c3
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c7
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c121
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c105
-rw-r--r--drivers/scsi/be2iscsi/be_main.h23
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bfa/bfad_im.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c19
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c46
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c8
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c59
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c8
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/hpsa.c2618
-rw-r--r--drivers/scsi/hpsa.h171
-rw-r--r--drivers/scsi/hpsa_cmd.h270
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c367
-rw-r--r--drivers/scsi/ipr.h22
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/request.c8
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c1
-rw-r--r--drivers/scsi/iscsi_tcp.c25
-rw-r--r--drivers/scsi/libiscsi.c232
-rw-r--r--drivers/scsi/libiscsi_tcp.c71
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c628
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c552
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c267
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c120
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h114
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c815
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c272
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h3
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c105
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h12
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c97
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c193
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c134
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h65
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c426
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c252
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c909
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h205
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c36
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h17
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h31
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c25
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c89
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c232
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c373
-rw-r--r--drivers/scsi/scsi_debug.c141
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_lib.c112
-rw-r--r--drivers/scsi/scsi_scan.c115
-rw-r--r--drivers/scsi/scsi_sysfs.c242
-rw-r--r--drivers/scsi/scsi_tgt_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c42
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/ses.c38
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/t128.c2
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c242
-rw-r--r--drivers/scsi/vmw_pvscsi.h19
-rw-r--r--drivers/scsi/wd7000.c2
139 files changed, 10209 insertions, 2648 deletions
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 1e9d6ad9302b..bcd223868227 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -584,7 +584,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
584 NCR5380_setup(instance); 584 NCR5380_setup(instance);
585 585
586 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) 586 for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
587 if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0)) 587 if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
588 trying_irqs |= mask; 588 trying_irqs |= mask;
589 589
590 timeout = jiffies + (250 * HZ / 1000); 590 timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9323d058706b..eaaf8705a5f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 30200 15# define AAC_DRIVER_BUILD 30300
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dada38aeacc0..5c6a8703f535 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -480,7 +480,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
480 480
481static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) 481static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
482{ 482{
483 u32 var; 483 u32 var = 0;
484 484
485 if (!(dev->supplement_adapter_info.SupportedOptions2 & 485 if (!(dev->supplement_adapter_info.SupportedOptions2 &
486 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { 486 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
@@ -500,13 +500,14 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
500 if (bled && (bled != -ETIMEDOUT)) 500 if (bled && (bled != -ETIMEDOUT))
501 return -EINVAL; 501 return -EINVAL;
502 } 502 }
503 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ 503 if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
504 rx_writel(dev, MUnit.reserved2, 3); 504 rx_writel(dev, MUnit.reserved2, 3);
505 msleep(5000); /* Delay 5 seconds */ 505 msleep(5000); /* Delay 5 seconds */
506 var = 0x00000001; 506 var = 0x00000001;
507 } 507 }
508 if (var != 0x00000001) 508 if (bled && (var != 0x00000001))
509 return -EINVAL; 509 return -EINVAL;
510 ssleep(5);
510 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 511 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
511 return -ENODEV; 512 return -ENODEV;
512 if (startup_timeout < 300) 513 if (startup_timeout < 300)
@@ -646,7 +647,7 @@ int _aac_rx_init(struct aac_dev *dev)
646 dev->sync_mode = 0; /* sync. mode not supported */ 647 dev->sync_mode = 0; /* sync. mode not supported */
647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 648 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
648 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 649 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
649 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 650 IRQF_SHARED, "aacraid", dev) < 0) {
650 if (dev->msi) 651 if (dev->msi)
651 pci_disable_msi(dev->pdev); 652 pci_disable_msi(dev->pdev);
652 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 653 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 2244f315f33b..e66477c98240 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -387,8 +387,7 @@ int aac_sa_init(struct aac_dev *dev)
387 goto error_irq; 387 goto error_irq;
388 dev->sync_mode = 0; /* sync. mode not supported */ 388 dev->sync_mode = 0; /* sync. mode not supported */
389 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 389 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
390 IRQF_SHARED|IRQF_DISABLED, 390 IRQF_SHARED, "aacraid", (void *)dev) < 0) {
391 "aacraid", (void *)dev ) < 0) {
392 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", 391 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
393 name, instance); 392 name, instance);
394 goto error_iounmap; 393 goto error_iounmap;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7e17107643d4..9c65aed26212 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -647,7 +647,7 @@ int aac_src_init(struct aac_dev *dev)
647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
648 648
649 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 649 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
650 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 650 IRQF_SHARED, "aacraid", dev) < 0) {
651 651
652 if (dev->msi) 652 if (dev->msi)
653 pci_disable_msi(dev->pdev); 653 pci_disable_msi(dev->pdev);
@@ -804,7 +804,7 @@ int aac_srcv_init(struct aac_dev *dev)
804 goto error_iounmap; 804 goto error_iounmap;
805 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 805 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
806 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 806 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
807 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 807 IRQF_SHARED, "aacraid", dev) < 0) {
808 if (dev->msi) 808 if (dev->msi)
809 pci_disable_msi(dev->pdev); 809 pci_disable_msi(dev->pdev);
810 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 810 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 3f7b6fee0a74..e86eb6a921fc 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -857,7 +857,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
857 SETPORT(SIMODE0, 0); 857 SETPORT(SIMODE0, 0);
858 SETPORT(SIMODE1, 0); 858 SETPORT(SIMODE1, 0);
859 859
860 if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { 860 if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
861 printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq); 861 printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
862 goto out_host_put; 862 goto out_host_put;
863 } 863 }
@@ -891,7 +891,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
891 SETPORT(SSTAT0, 0x7f); 891 SETPORT(SSTAT0, 0x7f);
892 SETPORT(SSTAT1, 0xef); 892 SETPORT(SSTAT1, 0xef);
893 893
894 if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { 894 if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
895 printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq); 895 printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
896 goto out_host_put; 896 goto out_host_put;
897 } 897 }
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
index 9df9e2ce3538..8373447bd7d3 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -209,7 +209,6 @@ struct instruction {
209#define AIC_OP_JC16 0x9105 209#define AIC_OP_JC16 0x9105
210#define AIC_OP_JNC16 0x9205 210#define AIC_OP_JNC16 0x9205
211#define AIC_OP_CALL16 0x9305 211#define AIC_OP_CALL16 0x9305
212#define AIC_OP_CALL16 0x9305
213 212
214/* Page extension is low three bits of second opcode byte. */ 213/* Page extension is low three bits of second opcode byte. */
215#define AIC_OP_JMPF 0xA005 214#define AIC_OP_JMPF 0xA005
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f6a30b8e5f9..652b41b4ddbd 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2500static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 2500static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2501{ 2501{
2502 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 2502 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
2503 dma_addr_t dma_coherent_handle; 2503
2504 /* 2504 /*
2505 ******************************************************************** 2505 ********************************************************************
2506 ** here we need to tell iop 331 our freeccb.HighPart 2506 ** here we need to tell iop 331 our freeccb.HighPart
2507 ** if freeccb.HighPart is not zero 2507 ** if freeccb.HighPart is not zero
2508 ******************************************************************** 2508 ********************************************************************
2509 */ 2509 */
2510 dma_coherent_handle = acb->dma_coherent_handle; 2510 cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
2511 cdb_phyaddr = (uint32_t)(dma_coherent_handle); 2511 cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
2512 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
2513 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; 2512 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
2514 /* 2513 /*
2515 *********************************************************************** 2514 ***********************************************************************
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ba1869d366..059ff477a398 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
2971 ec->irqaddr = ashost->fast + INT_REG; 2971 ec->irqaddr = ashost->fast + INT_REG;
2972 ec->irqmask = 0x0a; 2972 ec->irqmask = 0x0a;
2973 2973
2974 ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); 2974 ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
2975 if (ret) { 2975 if (ret) {
2976 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", 2976 printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
2977 host->host_no, ashost->scsi.irq, ret); 2977 host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index b679778376c5..f8e060900052 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
262 goto out_unmap; 262 goto out_unmap;
263 } 263 }
264 264
265 ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, 265 ret = request_irq(host->irq, cumanascsi_intr, 0,
266 "CumanaSCSI-1", host); 266 "CumanaSCSI-1", host);
267 if (ret) { 267 if (ret) {
268 printk("scsi%d: IRQ%d not free: %d\n", 268 printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 58915f29055b..abc66f5263ec 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
431 goto out_free; 431 goto out_free;
432 432
433 ret = request_irq(ec->irq, cumanascsi_2_intr, 433 ret = request_irq(ec->irq, cumanascsi_2_intr,
434 IRQF_DISABLED, "cumanascsi2", info); 434 0, "cumanascsi2", info);
435 if (ret) { 435 if (ret) {
436 printk("scsi%d: IRQ%d not free: %d\n", 436 printk("scsi%d: IRQ%d not free: %d\n",
437 host->host_no, ec->irq, ret); 437 host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index abc9593615e9..5e1b73e1b743 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
358 goto out_free; 358 goto out_free;
359 359
360 ret = request_irq(ec->irq, powertecscsi_intr, 360 ret = request_irq(ec->irq, powertecscsi_intr,
361 IRQF_DISABLED, "powertec", info); 361 0, "powertec", info);
362 if (ret) { 362 if (ret) {
363 printk("scsi%d: IRQ%d not free: %d\n", 363 printk("scsi%d: IRQ%d not free: %d\n",
364 host->host_no, ec->irq, ret); 364 host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 2e28f6c419fe..1bfb0bd01198 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -98,6 +98,14 @@ struct be_mcc_obj {
98 struct be_queue_info cq; 98 struct be_queue_info cq;
99}; 99};
100 100
101struct beiscsi_mcc_tag_state {
102#define MCC_TAG_STATE_COMPLETED 0x00
103#define MCC_TAG_STATE_RUNNING 0x01
104#define MCC_TAG_STATE_TIMEOUT 0x02
105 uint8_t tag_state;
106 struct be_dma_mem tag_mem_state;
107};
108
101struct be_ctrl_info { 109struct be_ctrl_info {
102 u8 __iomem *csr; 110 u8 __iomem *csr;
103 u8 __iomem *db; /* Door Bell */ 111 u8 __iomem *db; /* Door Bell */
@@ -122,6 +130,8 @@ struct be_ctrl_info {
122 unsigned short mcc_alloc_index; 130 unsigned short mcc_alloc_index;
123 unsigned short mcc_free_index; 131 unsigned short mcc_free_index;
124 unsigned int mcc_tag_available; 132 unsigned int mcc_tag_available;
133
134 struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
125}; 135};
126 136
127#include "be_cmds.h" 137#include "be_cmds.h"
@@ -129,6 +139,7 @@ struct be_ctrl_info {
129#define PAGE_SHIFT_4K 12 139#define PAGE_SHIFT_4K 12
130#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 140#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
131#define mcc_timeout 120000 /* 12s timeout */ 141#define mcc_timeout 120000 /* 12s timeout */
142#define BEISCSI_LOGOUT_SYNC_DELAY 250
132 143
133/* Returns number of pages spanned by the data starting at the given addr */ 144/* Returns number of pages spanned by the data starting at the given addr */
134#define PAGES_4K_SPANNED(_address, size) \ 145#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 3338391b64de..1432ed5e9fc6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -138,7 +138,7 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
138 * @phba: Driver private structure 138 * @phba: Driver private structure
139 * @tag: Tag for the MBX Command 139 * @tag: Tag for the MBX Command
140 * @wrb: the WRB used for the MBX Command 140 * @wrb: the WRB used for the MBX Command
141 * @cmd_hdr: IOCTL Hdr for the MBX Cmd 141 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
142 * 142 *
143 * Waits for MBX completion with the passed TAG. 143 * Waits for MBX completion with the passed TAG.
144 * 144 *
@@ -148,21 +148,26 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
148 **/ 148 **/
149int beiscsi_mccq_compl(struct beiscsi_hba *phba, 149int beiscsi_mccq_compl(struct beiscsi_hba *phba,
150 uint32_t tag, struct be_mcc_wrb **wrb, 150 uint32_t tag, struct be_mcc_wrb **wrb,
151 void *cmd_hdr) 151 struct be_dma_mem *mbx_cmd_mem)
152{ 152{
153 int rc = 0; 153 int rc = 0;
154 uint32_t mcc_tag_response; 154 uint32_t mcc_tag_response;
155 uint16_t status = 0, addl_status = 0, wrb_num = 0; 155 uint16_t status = 0, addl_status = 0, wrb_num = 0;
156 struct be_mcc_wrb *temp_wrb; 156 struct be_mcc_wrb *temp_wrb;
157 struct be_cmd_req_hdr *ioctl_hdr; 157 struct be_cmd_req_hdr *mbx_hdr;
158 struct be_cmd_resp_hdr *ioctl_resp_hdr; 158 struct be_cmd_resp_hdr *mbx_resp_hdr;
159 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 159 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
160 160
161 if (beiscsi_error(phba)) { 161 if (beiscsi_error(phba)) {
162 free_mcc_tag(&phba->ctrl, tag); 162 free_mcc_tag(&phba->ctrl, tag);
163 return -EIO; 163 return -EPERM;
164 } 164 }
165 165
166 /* Set MBX Tag state to Active */
167 spin_lock(&phba->ctrl.mbox_lock);
168 phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
169 spin_unlock(&phba->ctrl.mbox_lock);
170
166 /* wait for the mccq completion */ 171 /* wait for the mccq completion */
167 rc = wait_event_interruptible_timeout( 172 rc = wait_event_interruptible_timeout(
168 phba->ctrl.mcc_wait[tag], 173 phba->ctrl.mcc_wait[tag],
@@ -171,56 +176,71 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
171 BEISCSI_HOST_MBX_TIMEOUT)); 176 BEISCSI_HOST_MBX_TIMEOUT));
172 177
173 if (rc <= 0) { 178 if (rc <= 0) {
179 struct be_dma_mem *tag_mem;
180 /* Set MBX Tag state to timeout */
181 spin_lock(&phba->ctrl.mbox_lock);
182 phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
183 spin_unlock(&phba->ctrl.mbox_lock);
184
185 /* Store resource addr to be freed later */
186 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
187 if (mbx_cmd_mem) {
188 tag_mem->size = mbx_cmd_mem->size;
189 tag_mem->va = mbx_cmd_mem->va;
190 tag_mem->dma = mbx_cmd_mem->dma;
191 } else
192 tag_mem->size = 0;
193
174 beiscsi_log(phba, KERN_ERR, 194 beiscsi_log(phba, KERN_ERR,
175 BEISCSI_LOG_INIT | BEISCSI_LOG_EH | 195 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
176 BEISCSI_LOG_CONFIG, 196 BEISCSI_LOG_CONFIG,
177 "BC_%d : MBX Cmd Completion timed out\n"); 197 "BC_%d : MBX Cmd Completion timed out\n");
178 rc = -EBUSY; 198 return -EBUSY;
179 199 } else {
180 /* decrement the mccq used count */
181 atomic_dec(&phba->ctrl.mcc_obj.q.used);
182
183 goto release_mcc_tag;
184 } else
185 rc = 0; 200 rc = 0;
201 /* Set MBX Tag state to completed */
202 spin_lock(&phba->ctrl.mbox_lock);
203 phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
204 spin_unlock(&phba->ctrl.mbox_lock);
205 }
186 206
187 mcc_tag_response = phba->ctrl.mcc_numtag[tag]; 207 mcc_tag_response = phba->ctrl.mcc_numtag[tag];
188 status = (mcc_tag_response & CQE_STATUS_MASK); 208 status = (mcc_tag_response & CQE_STATUS_MASK);
189 addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >> 209 addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
190 CQE_STATUS_ADDL_SHIFT); 210 CQE_STATUS_ADDL_SHIFT);
191 211
192 if (cmd_hdr) { 212 if (mbx_cmd_mem) {
193 ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr; 213 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
194 } else { 214 } else {
195 wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >> 215 wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
196 CQE_STATUS_WRB_SHIFT; 216 CQE_STATUS_WRB_SHIFT;
197 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); 217 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
198 ioctl_hdr = embedded_payload(temp_wrb); 218 mbx_hdr = embedded_payload(temp_wrb);
199 219
200 if (wrb) 220 if (wrb)
201 *wrb = temp_wrb; 221 *wrb = temp_wrb;
202 } 222 }
203 223
204 if (status || addl_status) { 224 if (status || addl_status) {
205 beiscsi_log(phba, KERN_ERR, 225 beiscsi_log(phba, KERN_WARNING,
206 BEISCSI_LOG_INIT | BEISCSI_LOG_EH | 226 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
207 BEISCSI_LOG_CONFIG, 227 BEISCSI_LOG_CONFIG,
208 "BC_%d : MBX Cmd Failed for " 228 "BC_%d : MBX Cmd Failed for "
209 "Subsys : %d Opcode : %d with " 229 "Subsys : %d Opcode : %d with "
210 "Status : %d and Extd_Status : %d\n", 230 "Status : %d and Extd_Status : %d\n",
211 ioctl_hdr->subsystem, 231 mbx_hdr->subsystem,
212 ioctl_hdr->opcode, 232 mbx_hdr->opcode,
213 status, addl_status); 233 status, addl_status);
214 234
215 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { 235 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
216 ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; 236 mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
217 beiscsi_log(phba, KERN_WARNING, 237 beiscsi_log(phba, KERN_WARNING,
218 BEISCSI_LOG_INIT | BEISCSI_LOG_EH | 238 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
219 BEISCSI_LOG_CONFIG, 239 BEISCSI_LOG_CONFIG,
220 "BC_%d : Insufficent Buffer Error " 240 "BC_%d : Insufficent Buffer Error "
221 "Resp_Len : %d Actual_Resp_Len : %d\n", 241 "Resp_Len : %d Actual_Resp_Len : %d\n",
222 ioctl_resp_hdr->response_length, 242 mbx_resp_hdr->response_length,
223 ioctl_resp_hdr->actual_resp_len); 243 mbx_resp_hdr->actual_resp_len);
224 244
225 rc = -EAGAIN; 245 rc = -EAGAIN;
226 goto release_mcc_tag; 246 goto release_mcc_tag;
@@ -319,6 +339,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
319int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, 339int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
320 struct be_mcc_compl *compl) 340 struct be_mcc_compl *compl)
321{ 341{
342 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
322 u16 compl_status, extd_status; 343 u16 compl_status, extd_status;
323 unsigned short tag; 344 unsigned short tag;
324 345
@@ -338,7 +359,32 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
338 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000); 359 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
339 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8; 360 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
340 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); 361 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
341 wake_up_interruptible(&ctrl->mcc_wait[tag]); 362
363 if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
364 wake_up_interruptible(&ctrl->mcc_wait[tag]);
365 } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
366 struct be_dma_mem *tag_mem;
367 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
368
369 beiscsi_log(phba, KERN_WARNING,
370 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
371 BEISCSI_LOG_CONFIG,
372 "BC_%d : MBX Completion for timeout Command "
373 "from FW\n");
374 /* Check if memory needs to be freed */
375 if (tag_mem->size)
376 pci_free_consistent(ctrl->pdev, tag_mem->size,
377 tag_mem->va, tag_mem->dma);
378
379 /* Change tag state */
380 spin_lock(&phba->ctrl.mbox_lock);
381 ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
382 spin_unlock(&phba->ctrl.mbox_lock);
383
384 /* Free MCC Tag */
385 free_mcc_tag(ctrl, tag);
386 }
387
342 return 0; 388 return 0;
343} 389}
344 390
@@ -354,8 +400,23 @@ static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
354 return NULL; 400 return NULL;
355} 401}
356 402
357static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) 403/**
404 * be2iscsi_fail_session(): Closing session with appropriate error
405 * @cls_session: ptr to session
406 *
407 * Depending on adapter state appropriate error flag is passed.
408 **/
409void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
358{ 410{
411 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
412 struct beiscsi_hba *phba = iscsi_host_priv(shost);
413 uint32_t iscsi_err_flag;
414
415 if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
416 iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
417 else
418 iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
419
359 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 420 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
360} 421}
361 422
@@ -386,18 +447,6 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
386 } 447 }
387} 448}
388 449
389static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
390 u16 num_popped)
391{
392 u32 val = 0;
393 val |= qid & DB_CQ_RING_ID_MASK;
394 if (arm)
395 val |= 1 << DB_CQ_REARM_SHIFT;
396 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
397 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
398}
399
400
401int beiscsi_process_mcc(struct beiscsi_hba *phba) 450int beiscsi_process_mcc(struct beiscsi_hba *phba)
402{ 451{
403 struct be_mcc_compl *compl; 452 struct be_mcc_compl *compl;
@@ -428,7 +477,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
428 } 477 }
429 478
430 if (num) 479 if (num)
431 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); 480 hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
432 481
433 spin_unlock_bh(&phba->ctrl.mcc_cq_lock); 482 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
434 return status; 483 return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 627ebbe0172c..7cf7f99ee442 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -103,7 +103,7 @@ struct be_mcc_compl {
103 103
104/********** MCC door bell ************/ 104/********** MCC door bell ************/
105#define DB_MCCQ_OFFSET 0x140 105#define DB_MCCQ_OFFSET 0x140
106#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ 106#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */
107/* Number of entries posted */ 107/* Number of entries posted */
108#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */ 108#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
109 109
@@ -709,7 +709,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
709void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); 709void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
710 710
711int beiscsi_mccq_compl(struct beiscsi_hba *phba, 711int beiscsi_mccq_compl(struct beiscsi_hba *phba,
712 uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); 712 uint32_t tag, struct be_mcc_wrb **wrb,
713 struct be_dma_mem *mbx_cmd_mem);
713/*ISCSI Functuions */ 714/*ISCSI Functuions */
714int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 715int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
715int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); 716int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
@@ -1017,8 +1018,8 @@ struct be_mcc_wrb_context {
1017 int *users_final_status; 1018 int *users_final_status;
1018} __packed; 1019} __packed;
1019 1020
1020#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 1021#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
1021#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */ 1022#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
1022#define DB_DEF_PDU_REARM_SHIFT 14 1023#define DB_DEF_PDU_REARM_SHIFT 14
1023#define DB_DEF_PDU_EVENT_SHIFT 15 1024#define DB_DEF_PDU_EVENT_SHIFT 15
1024#define DB_DEF_PDU_CQPROC_SHIFT 16 1025#define DB_DEF_PDU_CQPROC_SHIFT 16
@@ -1317,4 +1318,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
1317void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 1318void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
1318 u8 subsystem, u8 opcode, int cmd_len); 1319 u8 subsystem, u8 opcode, int cmd_len);
1319 1320
1321void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
1320#endif /* !BEISCSI_CMDS_H */ 1322#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 889066d9d6fb..a3df43324c98 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -793,7 +793,7 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
793 ihost->port_speed = ISCSI_PORT_SPEED_10MBPS; 793 ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
794 break; 794 break;
795 case BE2ISCSI_LINK_SPEED_100MBPS: 795 case BE2ISCSI_LINK_SPEED_100MBPS:
796 ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS; 796 ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
797 break; 797 break;
798 case BE2ISCSI_LINK_SPEED_1GBPS: 798 case BE2ISCSI_LINK_SPEED_1GBPS:
799 ihost->port_speed = ISCSI_PORT_SPEED_1GBPS; 799 ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
@@ -1153,16 +1153,18 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1153 return -EAGAIN; 1153 return -EAGAIN;
1154 } 1154 }
1155 1155
1156 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 1156 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
1157 if (ret) { 1157 if (ret) {
1158 beiscsi_log(phba, KERN_ERR, 1158 beiscsi_log(phba, KERN_ERR,
1159 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 1159 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
1160 "BS_%d : mgmt_open_connection Failed"); 1160 "BS_%d : mgmt_open_connection Failed");
1161 1161
1162 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1162 if (ret != -EBUSY)
1163 nonemb_cmd.va, nonemb_cmd.dma); 1163 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1164 nonemb_cmd.va, nonemb_cmd.dma);
1165
1164 beiscsi_free_ep(beiscsi_ep); 1166 beiscsi_free_ep(beiscsi_ep);
1165 return -EBUSY; 1167 return ret;
1166 } 1168 }
1167 1169
1168 ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; 1170 ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
@@ -1359,6 +1361,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
1359 beiscsi_mccq_compl(phba, tag, NULL, NULL); 1361 beiscsi_mccq_compl(phba, tag, NULL, NULL);
1360 beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); 1362 beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
1361free_ep: 1363free_ep:
1364 msleep(BEISCSI_LOGOUT_SYNC_DELAY);
1362 beiscsi_free_ep(beiscsi_ep); 1365 beiscsi_free_ep(beiscsi_ep);
1363 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 1366 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
1364 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); 1367 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5642a9b250c2..9be818f7b26d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -228,24 +228,25 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
228 struct invalidate_command_table *inv_tbl; 228 struct invalidate_command_table *inv_tbl;
229 struct be_dma_mem nonemb_cmd; 229 struct be_dma_mem nonemb_cmd;
230 unsigned int cid, tag, num_invalidate; 230 unsigned int cid, tag, num_invalidate;
231 int rc;
231 232
232 cls_session = starget_to_session(scsi_target(sc->device)); 233 cls_session = starget_to_session(scsi_target(sc->device));
233 session = cls_session->dd_data; 234 session = cls_session->dd_data;
234 235
235 spin_lock_bh(&session->lock); 236 spin_lock_bh(&session->frwd_lock);
236 if (!aborted_task || !aborted_task->sc) { 237 if (!aborted_task || !aborted_task->sc) {
237 /* we raced */ 238 /* we raced */
238 spin_unlock_bh(&session->lock); 239 spin_unlock_bh(&session->frwd_lock);
239 return SUCCESS; 240 return SUCCESS;
240 } 241 }
241 242
242 aborted_io_task = aborted_task->dd_data; 243 aborted_io_task = aborted_task->dd_data;
243 if (!aborted_io_task->scsi_cmnd) { 244 if (!aborted_io_task->scsi_cmnd) {
244 /* raced or invalid command */ 245 /* raced or invalid command */
245 spin_unlock_bh(&session->lock); 246 spin_unlock_bh(&session->frwd_lock);
246 return SUCCESS; 247 return SUCCESS;
247 } 248 }
248 spin_unlock_bh(&session->lock); 249 spin_unlock_bh(&session->frwd_lock);
249 /* Invalidate WRB Posted for this Task */ 250 /* Invalidate WRB Posted for this Task */
250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 251 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
251 aborted_io_task->pwrb_handle->pwrb, 252 aborted_io_task->pwrb_handle->pwrb,
@@ -285,9 +286,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
285 return FAILED; 286 return FAILED;
286 } 287 }
287 288
288 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 289 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
289 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 290 if (rc != -EBUSY)
290 nonemb_cmd.va, nonemb_cmd.dma); 291 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
292 nonemb_cmd.va, nonemb_cmd.dma);
293
291 return iscsi_eh_abort(sc); 294 return iscsi_eh_abort(sc);
292} 295}
293 296
@@ -303,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
303 struct invalidate_command_table *inv_tbl; 306 struct invalidate_command_table *inv_tbl;
304 struct be_dma_mem nonemb_cmd; 307 struct be_dma_mem nonemb_cmd;
305 unsigned int cid, tag, i, num_invalidate; 308 unsigned int cid, tag, i, num_invalidate;
309 int rc;
306 310
307 /* invalidate iocbs */ 311 /* invalidate iocbs */
308 cls_session = starget_to_session(scsi_target(sc->device)); 312 cls_session = starget_to_session(scsi_target(sc->device));
309 session = cls_session->dd_data; 313 session = cls_session->dd_data;
310 spin_lock_bh(&session->lock); 314 spin_lock_bh(&session->frwd_lock);
311 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 315 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
312 spin_unlock_bh(&session->lock); 316 spin_unlock_bh(&session->frwd_lock);
313 return FAILED; 317 return FAILED;
314 } 318 }
315 conn = session->leadconn; 319 conn = session->leadconn;
@@ -338,7 +342,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
338 num_invalidate++; 342 num_invalidate++;
339 inv_tbl++; 343 inv_tbl++;
340 } 344 }
341 spin_unlock_bh(&session->lock); 345 spin_unlock_bh(&session->frwd_lock);
342 inv_tbl = phba->inv_tbl; 346 inv_tbl = phba->inv_tbl;
343 347
344 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 348 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -363,9 +367,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
363 return FAILED; 367 return FAILED;
364 } 368 }
365 369
366 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 370 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
367 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 371 if (rc != -EBUSY)
368 nonemb_cmd.va, nonemb_cmd.dma); 372 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
373 nonemb_cmd.va, nonemb_cmd.dma);
369 return iscsi_eh_device_reset(sc); 374 return iscsi_eh_device_reset(sc);
370} 375}
371 376
@@ -674,8 +679,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
674 } 679 }
675 680
676 pci_set_master(pcidev); 681 pci_set_master(pcidev);
677 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 682 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
678 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 683 if (ret) {
684 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
685 if (ret) {
686 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
687 pci_disable_device(pcidev);
688 return ret;
689 } else {
690 ret = pci_set_consistent_dma_mask(pcidev,
691 DMA_BIT_MASK(32));
692 }
693 } else {
694 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
679 if (ret) { 695 if (ret) {
680 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 696 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
681 pci_disable_device(pcidev); 697 pci_disable_device(pcidev);
@@ -804,14 +820,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
804 unsigned char rearm, unsigned char event) 820 unsigned char rearm, unsigned char event)
805{ 821{
806 u32 val = 0; 822 u32 val = 0;
807 val |= id & DB_EQ_RING_ID_MASK; 823
808 if (rearm) 824 if (rearm)
809 val |= 1 << DB_EQ_REARM_SHIFT; 825 val |= 1 << DB_EQ_REARM_SHIFT;
810 if (clr_interrupt) 826 if (clr_interrupt)
811 val |= 1 << DB_EQ_CLR_SHIFT; 827 val |= 1 << DB_EQ_CLR_SHIFT;
812 if (event) 828 if (event)
813 val |= 1 << DB_EQ_EVNT_SHIFT; 829 val |= 1 << DB_EQ_EVNT_SHIFT;
830
814 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 831 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
832 /* Setting lower order EQ_ID Bits */
833 val |= (id & DB_EQ_RING_ID_LOW_MASK);
834
835 /* Setting Higher order EQ_ID Bits */
836 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
837 DB_EQ_RING_ID_HIGH_MASK)
838 << DB_EQ_HIGH_SET_SHIFT);
839
815 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 840 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
816} 841}
817 842
@@ -1093,15 +1118,25 @@ free_msix_irqs:
1093 return ret; 1118 return ret;
1094} 1119}
1095 1120
1096static void hwi_ring_cq_db(struct beiscsi_hba *phba, 1121void hwi_ring_cq_db(struct beiscsi_hba *phba,
1097 unsigned int id, unsigned int num_processed, 1122 unsigned int id, unsigned int num_processed,
1098 unsigned char rearm, unsigned char event) 1123 unsigned char rearm, unsigned char event)
1099{ 1124{
1100 u32 val = 0; 1125 u32 val = 0;
1101 val |= id & DB_CQ_RING_ID_MASK; 1126
1102 if (rearm) 1127 if (rearm)
1103 val |= 1 << DB_CQ_REARM_SHIFT; 1128 val |= 1 << DB_CQ_REARM_SHIFT;
1129
1104 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 1130 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1131
1132 /* Setting lower order CQ_ID Bits */
1133 val |= (id & DB_CQ_RING_ID_LOW_MASK);
1134
1135 /* Setting Higher order CQ_ID Bits */
1136 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
1137 DB_CQ_RING_ID_HIGH_MASK)
1138 << DB_CQ_HIGH_SET_SHIFT);
1139
1105 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 1140 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1106} 1141}
1107 1142
@@ -1150,9 +1185,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1150 return 1; 1185 return 1;
1151 } 1186 }
1152 1187
1153 spin_lock_bh(&session->lock); 1188 spin_lock_bh(&session->back_lock);
1154 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1189 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1155 spin_unlock_bh(&session->lock); 1190 spin_unlock_bh(&session->back_lock);
1156 return 0; 1191 return 0;
1157} 1192}
1158 1193
@@ -1342,8 +1377,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1342 resid = csol_cqe->res_cnt; 1377 resid = csol_cqe->res_cnt;
1343 1378
1344 if (!task->sc) { 1379 if (!task->sc) {
1345 if (io_task->scsi_cmnd) 1380 if (io_task->scsi_cmnd) {
1346 scsi_dma_unmap(io_task->scsi_cmnd); 1381 scsi_dma_unmap(io_task->scsi_cmnd);
1382 io_task->scsi_cmnd = NULL;
1383 }
1347 1384
1348 return; 1385 return;
1349 } 1386 }
@@ -1380,6 +1417,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1380 conn->rxdata_octets += resid; 1417 conn->rxdata_octets += resid;
1381unmap: 1418unmap:
1382 scsi_dma_unmap(io_task->scsi_cmnd); 1419 scsi_dma_unmap(io_task->scsi_cmnd);
1420 io_task->scsi_cmnd = NULL;
1383 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1421 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1384} 1422}
1385 1423
@@ -1568,7 +1606,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1568 pwrb = pwrb_handle->pwrb; 1606 pwrb = pwrb_handle->pwrb;
1569 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1607 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1570 1608
1571 spin_lock_bh(&session->lock); 1609 spin_lock_bh(&session->back_lock);
1572 switch (type) { 1610 switch (type) {
1573 case HWH_TYPE_IO: 1611 case HWH_TYPE_IO:
1574 case HWH_TYPE_IO_RD: 1612 case HWH_TYPE_IO_RD:
@@ -1607,7 +1645,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1607 break; 1645 break;
1608 } 1646 }
1609 1647
1610 spin_unlock_bh(&session->lock); 1648 spin_unlock_bh(&session->back_lock);
1611} 1649}
1612 1650
1613static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1651static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
@@ -4360,12 +4398,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4360 goto boot_freemem; 4398 goto boot_freemem;
4361 } 4399 }
4362 4400
4363 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 4401 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
4364 if (ret) { 4402 if (ret) {
4365 beiscsi_log(phba, KERN_ERR, 4403 beiscsi_log(phba, KERN_ERR,
4366 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4404 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4367 "BM_%d : beiscsi_get_session_info Failed"); 4405 "BM_%d : beiscsi_get_session_info Failed");
4368 goto boot_freemem; 4406
4407 if (ret != -EBUSY)
4408 goto boot_freemem;
4409 else
4410 return ret;
4369 } 4411 }
4370 4412
4371 session_resp = nonemb_cmd.va ; 4413 session_resp = nonemb_cmd.va ;
@@ -4625,6 +4667,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4625 spin_unlock(&phba->io_sgl_lock); 4667 spin_unlock(&phba->io_sgl_lock);
4626 io_task->psgl_handle = NULL; 4668 io_task->psgl_handle = NULL;
4627 } 4669 }
4670
4671 if (io_task->scsi_cmnd) {
4672 scsi_dma_unmap(io_task->scsi_cmnd);
4673 io_task->scsi_cmnd = NULL;
4674 }
4628 } else { 4675 } else {
4629 if (!beiscsi_conn->login_in_progress) 4676 if (!beiscsi_conn->login_in_progress)
4630 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4677 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
@@ -4646,9 +4693,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4646 * login/startup related tasks. 4693 * login/startup related tasks.
4647 */ 4694 */
4648 beiscsi_conn->login_in_progress = 0; 4695 beiscsi_conn->login_in_progress = 0;
4649 spin_lock_bh(&session->lock); 4696 spin_lock_bh(&session->back_lock);
4650 beiscsi_cleanup_task(task); 4697 beiscsi_cleanup_task(task);
4651 spin_unlock_bh(&session->lock); 4698 spin_unlock_bh(&session->back_lock);
4652 4699
4653 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); 4700 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
4654 4701
@@ -5273,6 +5320,8 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
5273 return; 5320 return;
5274 } 5321 }
5275 5322
5323 phba->state = BE_ADAPTER_STATE_SHUTDOWN;
5324 iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
5276 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5325 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
5277 pci_disable_device(pcidev); 5326 pci_disable_device(pcidev);
5278} 5327}
@@ -5594,6 +5643,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5594 phba->ctrl.mcc_tag[i] = i + 1; 5643 phba->ctrl.mcc_tag[i] = i + 1;
5595 phba->ctrl.mcc_numtag[i + 1] = 0; 5644 phba->ctrl.mcc_numtag[i + 1] = 0;
5596 phba->ctrl.mcc_tag_available++; 5645 phba->ctrl.mcc_tag_available++;
5646 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5647 sizeof(struct beiscsi_mcc_tag_state));
5597 } 5648 }
5598 5649
5599 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5650 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 31fa27b4a9b2..9380b55bdeaf 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.0.659.0" 39#define BUILD_STR "10.2.125.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -97,9 +97,14 @@
97 97
98#define INVALID_SESS_HANDLE 0xFFFFFFFF 98#define INVALID_SESS_HANDLE 0xFFFFFFFF
99 99
100/**
101 * Adapter States
102 **/
100#define BE_ADAPTER_LINK_UP 0x001 103#define BE_ADAPTER_LINK_UP 0x001
101#define BE_ADAPTER_LINK_DOWN 0x002 104#define BE_ADAPTER_LINK_DOWN 0x002
102#define BE_ADAPTER_PCI_ERR 0x004 105#define BE_ADAPTER_PCI_ERR 0x004
106#define BE_ADAPTER_STATE_SHUTDOWN 0x008
107
103 108
104#define BEISCSI_CLEAN_UNLOAD 0x01 109#define BEISCSI_CLEAN_UNLOAD 0x01
105#define BEISCSI_EEH_UNLOAD 0x02 110#define BEISCSI_EEH_UNLOAD 0x02
@@ -135,11 +140,15 @@
135#define DB_RXULP0_OFFSET 0xA0 140#define DB_RXULP0_OFFSET 0xA0
136/********* Event Q door bell *************/ 141/********* Event Q door bell *************/
137#define DB_EQ_OFFSET DB_CQ_OFFSET 142#define DB_EQ_OFFSET DB_CQ_OFFSET
138#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 143#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */
139/* Clear the interrupt for this eq */ 144/* Clear the interrupt for this eq */
140#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 145#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
141/* Must be 1 */ 146/* Must be 1 */
142#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ 147#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
148/* Higher Order EQ_ID bit */
149#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
150#define DB_EQ_HIGH_SET_SHIFT 11
151#define DB_EQ_HIGH_FEILD_SHIFT 9
143/* Number of event entries processed */ 152/* Number of event entries processed */
144#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 153#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
145/* Rearm bit */ 154/* Rearm bit */
@@ -147,7 +156,12 @@
147 156
148/********* Compl Q door bell *************/ 157/********* Compl Q door bell *************/
149#define DB_CQ_OFFSET 0x120 158#define DB_CQ_OFFSET 0x120
150#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 159#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */
160/* Higher Order CQ_ID bit */
161#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */
162#define DB_CQ_HIGH_SET_SHIFT 11
163#define DB_CQ_HIGH_FEILD_SHIFT 10
164
151/* Number of event entries processed */ 165/* Number of event entries processed */
152#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 166#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
153/* Rearm bit */ 167/* Rearm bit */
@@ -821,6 +835,9 @@ void beiscsi_process_all_cqs(struct work_struct *work);
821void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 835void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
822 struct iscsi_task *task); 836 struct iscsi_task *task);
823 837
838void hwi_ring_cq_db(struct beiscsi_hba *phba,
839 unsigned int id, unsigned int num_processed,
840 unsigned char rearm, unsigned char event);
824static inline bool beiscsi_error(struct beiscsi_hba *phba) 841static inline bool beiscsi_error(struct beiscsi_hba *phba)
825{ 842{
826 return phba->ue_detected || phba->fw_timeout; 843 return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index b2fcac78feaa..088bdf752cfa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -828,22 +828,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
828 be_mcc_notify(phba); 828 be_mcc_notify(phba);
829 spin_unlock(&ctrl->mbox_lock); 829 spin_unlock(&ctrl->mbox_lock);
830 830
831 rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); 831 rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
832
833 if (resp_buf)
834 memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
835
832 if (rc) { 836 if (rc) {
833 /* Check if the IOCTL needs to be re-issued */ 837 /* Check if the MBX Cmd needs to be re-issued */
834 if (rc == -EAGAIN) 838 if (rc == -EAGAIN)
835 return rc; 839 return rc;
836 840
837 beiscsi_log(phba, KERN_ERR, 841 beiscsi_log(phba, KERN_WARNING,
838 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 842 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
839 "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); 843 "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
840 844
841 goto free_cmd; 845 if (rc != -EBUSY)
846 goto free_cmd;
847 else
848 return rc;
842 } 849 }
843
844 if (resp_buf)
845 memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
846
847free_cmd: 850free_cmd:
848 pci_free_consistent(ctrl->pdev, nonemb_cmd->size, 851 pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
849 nonemb_cmd->va, nonemb_cmd->dma); 852 nonemb_cmd->va, nonemb_cmd->dma);
@@ -1348,7 +1351,6 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
1348{ 1351{
1349 int rc; 1352 int rc;
1350 unsigned int tag; 1353 unsigned int tag;
1351 struct be_mcc_wrb *wrb = NULL;
1352 1354
1353 tag = be_cmd_set_vlan(phba, vlan_tag); 1355 tag = be_cmd_set_vlan(phba, vlan_tag);
1354 if (!tag) { 1356 if (!tag) {
@@ -1358,7 +1360,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
1358 return -EBUSY; 1360 return -EBUSY;
1359 } 1361 }
1360 1362
1361 rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); 1363 rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
1362 if (rc) { 1364 if (rc) {
1363 beiscsi_log(phba, KERN_ERR, 1365 beiscsi_log(phba, KERN_ERR,
1364 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), 1366 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e15de6e..315d6d6dcfc8 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3878,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3878 bfa_trc(sfp, sfp->data_valid); 3878 bfa_trc(sfp, sfp->data_valid);
3879 if (sfp->data_valid) { 3879 if (sfp->data_valid) {
3880 u32 size = sizeof(struct sfp_mem_s); 3880 u32 size = sizeof(struct sfp_mem_s);
3881 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base); 3881 u8 *des = (u8 *) &(sfp->sfpmem);
3882 memcpy(des, sfp->dbuf_kva, size); 3882 memcpy(des, sfp->dbuf_kva, size);
3883 } 3883 }
3884 /* 3884 /*
@@ -6851,7 +6851,7 @@ static u32
6851bfa_flash_status_read(void __iomem *pci_bar) 6851bfa_flash_status_read(void __iomem *pci_bar)
6852{ 6852{
6853 union bfa_flash_dev_status_reg_u dev_status; 6853 union bfa_flash_dev_status_reg_u dev_status;
6854 u32 status; 6854 int status;
6855 u32 ret_status; 6855 u32 ret_status;
6856 int i; 6856 int i;
6857 6857
@@ -6899,7 +6899,7 @@ static u32
6899bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 6899bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6900 char *buf) 6900 char *buf)
6901{ 6901{
6902 u32 status; 6902 int status;
6903 6903
6904 /* 6904 /*
6905 * len must be mutiple of 4 and not exceeding fifo size 6905 * len must be mutiple of 4 and not exceeding fifo size
@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
7006 while (!bfa_raw_sem_get(bar)) { 7006 while (!bfa_raw_sem_get(bar)) {
7007 if (--n <= 0) 7007 if (--n <= 0)
7008 return BFA_STATUS_BADFLASH; 7008 return BFA_STATUS_BADFLASH;
7009 udelay(10000); 7009 mdelay(10);
7010 } 7010 }
7011 return BFA_STATUS_OK; 7011 return BFA_STATUS_OK;
7012} 7012}
@@ -7021,7 +7021,8 @@ bfa_status_t
7021bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 7021bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
7022 u32 len) 7022 u32 len)
7023{ 7023{
7024 u32 n, status; 7024 u32 n;
7025 int status;
7025 u32 off, l, s, residue, fifo_sz; 7026 u32 off, l, s, residue, fifo_sz;
7026 7027
7027 residue = len; 7028 residue = len;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 157f6044a9bb..8994fb857ee9 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2304,8 +2304,10 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2304 2304
2305 spin_lock_irqsave(&bfad->bfad_lock, flags); 2305 spin_lock_irqsave(&bfad->bfad_lock, flags);
2306 2306
2307 if (bfa_fcport_is_dport(&bfad->bfa)) 2307 if (bfa_fcport_is_dport(&bfad->bfa)) {
2308 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2308 return BFA_STATUS_DPORT_ERR; 2309 return BFA_STATUS_DPORT_ERR;
2310 }
2309 2311
2310 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || 2312 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2311 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) 2313 (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9967f9c14851..f067332bf763 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -73,9 +73,14 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
73 73
74 break; 74 break;
75 75
76 case BFI_IOIM_STS_ABORTED:
77 case BFI_IOIM_STS_TIMEDOUT: 76 case BFI_IOIM_STS_TIMEDOUT:
77 host_status = DID_TIME_OUT;
78 cmnd->result = ScsiResult(host_status, 0);
79 break;
78 case BFI_IOIM_STS_PATHTOV: 80 case BFI_IOIM_STS_PATHTOV:
81 host_status = DID_TRANSPORT_DISRUPTED;
82 cmnd->result = ScsiResult(host_status, 0);
83 break;
79 default: 84 default:
80 host_status = DID_ERROR; 85 host_status = DID_ERROR;
81 cmnd->result = ScsiResult(host_status, 0); 86 cmnd->result = ScsiResult(host_status, 0);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1ebf3fb683e6..6a976657b475 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
64#include "bnx2fc_constants.h" 64#include "bnx2fc_constants.h"
65 65
66#define BNX2FC_NAME "bnx2fc" 66#define BNX2FC_NAME "bnx2fc"
67#define BNX2FC_VERSION "2.4.1" 67#define BNX2FC_VERSION "2.4.2"
68 68
69#define PFX "bnx2fc: " 69#define PFX "bnx2fc: "
70 70
@@ -367,6 +367,7 @@ struct bnx2fc_rport {
367 atomic_t num_active_ios; 367 atomic_t num_active_ios;
368 u32 flush_in_prog; 368 u32 flush_in_prog;
369 unsigned long timestamp; 369 unsigned long timestamp;
370 unsigned long retry_delay_timestamp;
370 struct list_head free_task_list; 371 struct list_head free_task_list;
371 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; 372 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
372 struct list_head active_cmd_queue; 373 struct list_head active_cmd_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9b948505d118..6287f6a8b79d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Sep 17, 2013" 25#define DRV_MODULE_RELDATE "Dec 11, 2013"
26 26
27 27
28static char version[] = 28static char version[] =
@@ -850,6 +850,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
850 __bnx2fc_destroy(interface); 850 __bnx2fc_destroy(interface);
851 } 851 }
852 mutex_unlock(&bnx2fc_dev_lock); 852 mutex_unlock(&bnx2fc_dev_lock);
853
854 /* Ensure ALL destroy work has been completed before return */
855 flush_workqueue(bnx2fc_wq);
853 return; 856 return;
854 857
855 default: 858 default:
@@ -2389,6 +2392,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2389 __bnx2fc_destroy(interface); 2392 __bnx2fc_destroy(interface);
2390 mutex_unlock(&bnx2fc_dev_lock); 2393 mutex_unlock(&bnx2fc_dev_lock);
2391 2394
2395 /* Ensure ALL destroy work has been completed before return */
2396 flush_workqueue(bnx2fc_wq);
2397
2392 bnx2fc_ulp_stop(hba); 2398 bnx2fc_ulp_stop(hba);
2393 /* unregister cnic device */ 2399 /* unregister cnic device */
2394 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) 2400 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e9279a8c1e1c..32a5e0a2a669 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1871,7 +1871,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
1871 rc = SCSI_MLQUEUE_TARGET_BUSY; 1871 rc = SCSI_MLQUEUE_TARGET_BUSY;
1872 goto exit_qcmd; 1872 goto exit_qcmd;
1873 } 1873 }
1874 1874 if (tgt->retry_delay_timestamp) {
1875 if (time_after(jiffies, tgt->retry_delay_timestamp)) {
1876 tgt->retry_delay_timestamp = 0;
1877 } else {
1878 /* If retry_delay timer is active, flow off the ML */
1879 rc = SCSI_MLQUEUE_TARGET_BUSY;
1880 goto exit_qcmd;
1881 }
1882 }
1875 io_req = bnx2fc_cmd_alloc(tgt); 1883 io_req = bnx2fc_cmd_alloc(tgt);
1876 if (!io_req) { 1884 if (!io_req) {
1877 rc = SCSI_MLQUEUE_HOST_BUSY; 1885 rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1961,6 +1969,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1961 " fcp_resid = 0x%x\n", 1969 " fcp_resid = 0x%x\n",
1962 io_req->cdb_status, io_req->fcp_resid); 1970 io_req->cdb_status, io_req->fcp_resid);
1963 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1971 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1972
1973 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1974 io_req->cdb_status == SAM_STAT_BUSY) {
1975 /* Set the jiffies + retry_delay_timer * 100ms
1976 for the rport/tgt */
1977 tgt->retry_delay_timestamp = jiffies +
1978 fcp_rsp->retry_delay_timer * HZ / 10;
1979 }
1980
1964 } 1981 }
1965 if (io_req->fcp_resid) 1982 if (io_req->fcp_resid)
1966 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1983 scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d9bae5672273..6870cf6781d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -386,6 +386,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
386 tgt->rq_prod_idx = 0x8000; 386 tgt->rq_prod_idx = 0x8000;
387 tgt->rq_cons_idx = 0; 387 tgt->rq_cons_idx = 0;
388 atomic_set(&tgt->num_active_ios, 0); 388 atomic_set(&tgt->num_active_ios, 0);
389 tgt->retry_delay_timestamp = 0;
389 390
390 if (rdata->flags & FC_RP_FLAGS_RETRY && 391 if (rdata->flags & FC_RP_FLAGS_RETRY &&
391 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && 392 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index b87a1933f880..b5ffd280a1ae 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1361 u32 datalen = 0; 1361 u32 datalen = 0;
1362 1362
1363 resp_cqe = (struct bnx2i_cmd_response *)cqe; 1363 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1364 spin_lock_bh(&session->lock); 1364 spin_lock_bh(&session->back_lock);
1365 task = iscsi_itt_to_task(conn, 1365 task = iscsi_itt_to_task(conn,
1366 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); 1366 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1367 if (!task) 1367 if (!task)
@@ -1432,7 +1432,7 @@ done:
1432 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, 1432 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1433 conn->data, datalen); 1433 conn->data, datalen);
1434fail: 1434fail:
1435 spin_unlock_bh(&session->lock); 1435 spin_unlock_bh(&session->back_lock);
1436 return 0; 1436 return 0;
1437} 1437}
1438 1438
@@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
1457 int pad_len; 1457 int pad_len;
1458 1458
1459 login = (struct bnx2i_login_response *) cqe; 1459 login = (struct bnx2i_login_response *) cqe;
1460 spin_lock(&session->lock); 1460 spin_lock(&session->back_lock);
1461 task = iscsi_itt_to_task(conn, 1461 task = iscsi_itt_to_task(conn,
1462 login->itt & ISCSI_LOGIN_RESPONSE_INDEX); 1462 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1463 if (!task) 1463 if (!task)
@@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
1500 bnx2i_conn->gen_pdu.resp_buf, 1500 bnx2i_conn->gen_pdu.resp_buf,
1501 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); 1501 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1502done: 1502done:
1503 spin_unlock(&session->lock); 1503 spin_unlock(&session->back_lock);
1504 return 0; 1504 return 0;
1505} 1505}
1506 1506
@@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
1525 int pad_len; 1525 int pad_len;
1526 1526
1527 text = (struct bnx2i_text_response *) cqe; 1527 text = (struct bnx2i_text_response *) cqe;
1528 spin_lock(&session->lock); 1528 spin_lock(&session->back_lock);
1529 task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); 1529 task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1530 if (!task) 1530 if (!task)
1531 goto done; 1531 goto done;
@@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
1561 bnx2i_conn->gen_pdu.resp_wr_ptr - 1561 bnx2i_conn->gen_pdu.resp_wr_ptr -
1562 bnx2i_conn->gen_pdu.resp_buf); 1562 bnx2i_conn->gen_pdu.resp_buf);
1563done: 1563done:
1564 spin_unlock(&session->lock); 1564 spin_unlock(&session->back_lock);
1565 return 0; 1565 return 0;
1566} 1566}
1567 1567
@@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1584 struct iscsi_tm_rsp *resp_hdr; 1584 struct iscsi_tm_rsp *resp_hdr;
1585 1585
1586 tmf_cqe = (struct bnx2i_tmf_response *)cqe; 1586 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1587 spin_lock(&session->lock); 1587 spin_lock(&session->back_lock);
1588 task = iscsi_itt_to_task(conn, 1588 task = iscsi_itt_to_task(conn,
1589 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); 1589 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1590 if (!task) 1590 if (!task)
@@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1600 1600
1601 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); 1601 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602done: 1602done:
1603 spin_unlock(&session->lock); 1603 spin_unlock(&session->back_lock);
1604 return 0; 1604 return 0;
1605} 1605}
1606 1606
@@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
1623 struct iscsi_logout_rsp *resp_hdr; 1623 struct iscsi_logout_rsp *resp_hdr;
1624 1624
1625 logout = (struct bnx2i_logout_response *) cqe; 1625 logout = (struct bnx2i_logout_response *) cqe;
1626 spin_lock(&session->lock); 1626 spin_lock(&session->back_lock);
1627 task = iscsi_itt_to_task(conn, 1627 task = iscsi_itt_to_task(conn,
1628 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); 1628 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1629 if (!task) 1629 if (!task)
@@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
1647 1647
1648 bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD; 1648 bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
1649done: 1649done:
1650 spin_unlock(&session->lock); 1650 spin_unlock(&session->back_lock);
1651 return 0; 1651 return 0;
1652} 1652}
1653 1653
@@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1668 struct iscsi_task *task; 1668 struct iscsi_task *task;
1669 1669
1670 nop_in = (struct bnx2i_nop_in_msg *)cqe; 1670 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1671 spin_lock(&session->lock); 1671 spin_lock(&session->back_lock);
1672 task = iscsi_itt_to_task(conn, 1672 task = iscsi_itt_to_task(conn,
1673 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); 1673 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1674 if (task) 1674 if (task)
1675 __iscsi_put_task(task); 1675 __iscsi_put_task(task);
1676 spin_unlock(&session->lock); 1676 spin_unlock(&session->back_lock);
1677} 1677}
1678 1678
1679/** 1679/**
@@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1712 1712
1713 nop_in = (struct bnx2i_nop_in_msg *)cqe; 1713 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1714 1714
1715 spin_lock(&session->lock); 1715 spin_lock(&session->back_lock);
1716 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; 1716 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1717 memset(hdr, 0, sizeof(struct iscsi_hdr)); 1717 memset(hdr, 0, sizeof(struct iscsi_hdr));
1718 hdr->opcode = nop_in->op_code; 1718 hdr->opcode = nop_in->op_code;
@@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1738 } 1738 }
1739done: 1739done:
1740 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1740 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1741 spin_unlock(&session->lock); 1741 spin_unlock(&session->back_lock);
1742 1742
1743 return tgt_async_nop; 1743 return tgt_async_nop;
1744} 1744}
@@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
1771 return; 1771 return;
1772 } 1772 }
1773 1773
1774 spin_lock(&session->lock); 1774 spin_lock(&session->back_lock);
1775 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; 1775 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1776 memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 1776 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1777 resp_hdr->opcode = async_cqe->op_code; 1777 resp_hdr->opcode = async_cqe->op_code;
@@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
1790 1790
1791 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, 1791 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1792 (struct iscsi_hdr *)resp_hdr, NULL, 0); 1792 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1793 spin_unlock(&session->lock); 1793 spin_unlock(&session->back_lock);
1794} 1794}
1795 1795
1796 1796
@@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1817 } else 1817 } else
1818 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); 1818 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1819 1819
1820 spin_lock(&session->lock); 1820 spin_lock(&session->back_lock);
1821 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; 1821 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1822 memset(hdr, 0, sizeof(struct iscsi_hdr)); 1822 memset(hdr, 0, sizeof(struct iscsi_hdr));
1823 hdr->opcode = reject->op_code; 1823 hdr->opcode = reject->op_code;
@@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1828 hdr->ffffffff = cpu_to_be32(RESERVED_ITT); 1828 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1829 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, 1829 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1830 reject->data_length); 1830 reject->data_length);
1831 spin_unlock(&session->lock); 1831 spin_unlock(&session->back_lock);
1832} 1832}
1833 1833
1834/** 1834/**
@@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1848 struct iscsi_task *task; 1848 struct iscsi_task *task;
1849 1849
1850 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; 1850 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1851 spin_lock(&session->lock); 1851 spin_lock(&session->back_lock);
1852 task = iscsi_itt_to_task(conn, 1852 task = iscsi_itt_to_task(conn,
1853 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); 1853 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1854 if (!task) 1854 if (!task)
1855 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", 1855 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1856 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); 1856 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1857 spin_unlock(&session->lock); 1857 spin_unlock(&session->back_lock);
1858 complete(&bnx2i_conn->cmd_cleanup_cmpl); 1858 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1859} 1859}
1860 1860
@@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
1921 int rc = 0; 1921 int rc = 0;
1922 int cpu; 1922 int cpu;
1923 1923
1924 spin_lock(&session->lock); 1924 spin_lock(&session->back_lock);
1925 task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, 1925 task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
1926 cqe->itt & ISCSI_CMD_RESPONSE_INDEX); 1926 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1927 if (!task || !task->sc) { 1927 if (!task || !task->sc) {
1928 spin_unlock(&session->lock); 1928 spin_unlock(&session->back_lock);
1929 return -EINVAL; 1929 return -EINVAL;
1930 } 1930 }
1931 sc = task->sc; 1931 sc = task->sc;
@@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
1935 else 1935 else
1936 cpu = sc->request->cpu; 1936 cpu = sc->request->cpu;
1937 1937
1938 spin_unlock(&session->lock); 1938 spin_unlock(&session->back_lock);
1939 1939
1940 p = &per_cpu(bnx2i_percpu, cpu); 1940 p = &per_cpu(bnx2i_percpu, cpu);
1941 spin_lock(&p->p_work_lock); 1941 spin_lock(&p->p_work_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c8b0aff5bbd4..166543f7ef55 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1170,10 +1170,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
1170 if (task->state == ISCSI_TASK_ABRT_TMF) { 1170 if (task->state == ISCSI_TASK_ABRT_TMF) {
1171 bnx2i_send_cmd_cleanup_req(hba, task->dd_data); 1171 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1172 1172
1173 spin_unlock_bh(&conn->session->lock); 1173 spin_unlock_bh(&conn->session->back_lock);
1174 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, 1174 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1175 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); 1175 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1176 spin_lock_bh(&conn->session->lock); 1176 spin_lock_bh(&conn->session->back_lock);
1177 } 1177 }
1178 bnx2i_iscsi_unmap_sg_list(task->dd_data); 1178 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1179} 1179}
@@ -2060,7 +2060,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2060 goto out; 2060 goto out;
2061 2061
2062 if (session) { 2062 if (session) {
2063 spin_lock_bh(&session->lock); 2063 spin_lock_bh(&session->frwd_lock);
2064 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { 2064 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2065 if (session->state == ISCSI_STATE_LOGGING_OUT) { 2065 if (session->state == ISCSI_STATE_LOGGING_OUT) {
2066 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { 2066 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
@@ -2076,7 +2076,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2076 } else 2076 } else
2077 close = 1; 2077 close = 1;
2078 2078
2079 spin_unlock_bh(&session->lock); 2079 spin_unlock_bh(&session->frwd_lock);
2080 } 2080 }
2081 2081
2082 bnx2i_ep->state = EP_STATE_DISCONN_START; 2082 bnx2i_ep->state = EP_STATE_DISCONN_START;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a9f84238a53..e8ee5e5fe0ef 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -175,52 +175,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
175 sizeof(struct fw_ofld_tx_data_wr)); 175 sizeof(struct fw_ofld_tx_data_wr));
176} 176}
177 177
178
179#define VLAN_NONE 0xfff
180#define FILTER_SEL_VLAN_NONE 0xffff
181#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
182#define FILTER_SEL_WIDTH_VIN_P_FC \
183 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
184#define FILTER_SEL_WIDTH_TAG_P_FC \
185 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
186#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
187
188static unsigned int select_ntuple(struct cxgbi_device *cdev,
189 struct l2t_entry *l2t)
190{
191 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
192 unsigned int ntuple = 0;
193 u32 viid;
194
195 switch (lldi->filt_mode) {
196
197 /* default filter mode */
198 case HW_TPL_FR_MT_PR_IV_P_FC:
199 if (l2t->vlan == VLAN_NONE)
200 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
201 else {
202 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
203 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
204 }
205 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
206 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
207 break;
208 case HW_TPL_FR_MT_PR_OV_P_FC: {
209 viid = cxgb4_port_viid(l2t->neigh->dev);
210
211 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
212 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
213 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
214 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
215 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
216 break;
217 }
218 default:
219 break;
220 }
221 return ntuple;
222}
223
224static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 178static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
225 struct l2t_entry *e) 179 struct l2t_entry *e)
226{ 180{
@@ -248,8 +202,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
248 struct cpl_act_open_req *req = 202 struct cpl_act_open_req *req =
249 (struct cpl_act_open_req *)skb->head; 203 (struct cpl_act_open_req *)skb->head;
250 204
251 req = (struct cpl_act_open_req *)skb->head;
252
253 INIT_TP_WR(req, 0); 205 INIT_TP_WR(req, 0);
254 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 206 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
255 qid_atid)); 207 qid_atid));
@@ -258,7 +210,9 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
258 req->local_ip = csk->saddr.sin_addr.s_addr; 210 req->local_ip = csk->saddr.sin_addr.s_addr;
259 req->peer_ip = csk->daddr.sin_addr.s_addr; 211 req->peer_ip = csk->daddr.sin_addr.s_addr;
260 req->opt0 = cpu_to_be64(opt0); 212 req->opt0 = cpu_to_be64(opt0);
261 req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); 213 req->params = cpu_to_be32(cxgb4_select_ntuple(
214 csk->cdev->ports[csk->port_id],
215 csk->l2t));
262 opt2 |= 1 << 22; 216 opt2 |= 1 << 22;
263 req->opt2 = cpu_to_be32(opt2); 217 req->opt2 = cpu_to_be32(opt2);
264 218
@@ -271,8 +225,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
271 struct cpl_t5_act_open_req *req = 225 struct cpl_t5_act_open_req *req =
272 (struct cpl_t5_act_open_req *)skb->head; 226 (struct cpl_t5_act_open_req *)skb->head;
273 227
274 req = (struct cpl_t5_act_open_req *)skb->head;
275
276 INIT_TP_WR(req, 0); 228 INIT_TP_WR(req, 0);
277 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 229 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
278 qid_atid)); 230 qid_atid));
@@ -281,7 +233,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
281 req->local_ip = csk->saddr.sin_addr.s_addr; 233 req->local_ip = csk->saddr.sin_addr.s_addr;
282 req->peer_ip = csk->daddr.sin_addr.s_addr; 234 req->peer_ip = csk->daddr.sin_addr.s_addr;
283 req->opt0 = cpu_to_be64(opt0); 235 req->opt0 = cpu_to_be64(opt0);
284 req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); 236 req->params = cpu_to_be64(V_FILTER_TUPLE(
237 cxgb4_select_ntuple(
238 csk->cdev->ports[csk->port_id],
239 csk->l2t)));
285 opt2 |= 1 << 31; 240 opt2 |= 1 << 31;
286 req->opt2 = cpu_to_be32(opt2); 241 req->opt2 = cpu_to_be32(opt2);
287 242
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index d01f01604140..eb29fe7eaf49 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -277,7 +277,7 @@ found:
277 /* With interrupts enabled, it will sometimes hang when doing heavy 277 /* With interrupts enabled, it will sometimes hang when doing heavy
278 * reads. So better not enable them until I finger it out. */ 278 * reads. So better not enable them until I finger it out. */
279 if (instance->irq != SCSI_IRQ_NONE) 279 if (instance->irq != SCSI_IRQ_NONE)
280 if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, 280 if (request_irq(instance->irq, dtc_intr, 0,
281 "dtc", instance)) { 281 "dtc", instance)) {
282 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 282 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
283 instance->irq = SCSI_IRQ_NONE; 283 instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 94de88955a99..ebf57364df91 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1221,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1221 1221
1222 /* Board detected, allocate its IRQ */ 1222 /* Board detected, allocate its IRQ */
1223 if (request_irq(irq, do_interrupt_handler, 1223 if (request_irq(irq, do_interrupt_handler,
1224 IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), 1224 (subversion == ESA) ? IRQF_SHARED : 0,
1225 driver_name, (void *)&sha[j])) { 1225 driver_name, (void *)&sha[j])) {
1226 printk("%s: unable to allocate IRQ %u, detaching.\n", name, 1226 printk("%s: unable to allocate IRQ %u, detaching.\n", name,
1227 irq); 1227 irq);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 1663173cdb91..8319d2b417b8 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -687,7 +687,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev
687 return 0; 687 return 0;
688 688
689 if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */ 689 if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
690 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) { 690 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
691 reg_IRQ[gc->IRQ]++; 691 reg_IRQ[gc->IRQ]++;
692 if (!gc->IRQ_TR) 692 if (!gc->IRQ_TR)
693 reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */ 693 reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */
@@ -921,7 +921,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
921 921
922 for (i = 0; i < MAXIRQ; i++) 922 for (i = 0; i < MAXIRQ; i++)
923 if (reg_IRQ[i]) 923 if (reg_IRQ[i])
924 request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); 924 request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
925 925
926 HBA_ptr = first_HBA; 926 HBA_ptr = first_HBA;
927 927
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index b9750e296d71..6776931e25d4 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -231,7 +231,7 @@ use_legacy_interrupts:
231 231
232static void esas2r_claim_interrupts(struct esas2r_adapter *a) 232static void esas2r_claim_interrupts(struct esas2r_adapter *a)
233{ 233{
234 unsigned long flags = IRQF_DISABLED; 234 unsigned long flags = 0;
235 235
236 if (a->intr_mode == INTR_MODE_LEGACY) 236 if (a->intr_mode == INTR_MODE_LEGACY)
237 flags |= IRQF_SHARED; 237 flags |= IRQF_SHARED;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 9bf285df58dd..a82030aa8577 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -165,13 +165,9 @@ static int esas2r_log_master(const long level,
165 165
166 /* 166 /*
167 * Put a line break at the end of the formatted string so that 167 * Put a line break at the end of the formatted string so that
168 * we don't wind up with run-on messages. only append if there 168 * we don't wind up with run-on messages.
169 * is enough space in the buffer.
170 */ 169 */
171 if (strlen(event_buffer) < buflen) 170 printk("%s\n", event_buffer);
172 strcat(buffer, "\n");
173
174 printk(event_buffer);
175 171
176 spin_unlock_irqrestore(&event_buffer_lock, flags); 172 spin_unlock_irqrestore(&event_buffer_lock, flags);
177 } 173 }
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5cec6c60ca22..7176365e916b 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -461,7 +461,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)
461 461
462 if (instance->irq != SCSI_IRQ_NONE) 462 if (instance->irq != SCSI_IRQ_NONE)
463 if (request_irq(instance->irq, generic_NCR5380_intr, 463 if (request_irq(instance->irq, generic_NCR5380_intr,
464 IRQF_DISABLED, "NCR5380", instance)) { 464 0, "NCR5380", instance)) {
465 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); 465 printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
466 instance->irq = SCSI_IRQ_NONE; 466 instance->irq = SCSI_IRQ_NONE;
467 } 467 }
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ce5ef0190bad..0f1ae13ce7c7 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4711,7 +4711,7 @@ static int __init gdth_isa_probe_one(u32 isa_bios)
4711 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", 4711 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
4712 isa_bios, ha->irq, ha->drq); 4712 isa_bios, ha->irq, ha->drq);
4713 4713
4714 error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); 4714 error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
4715 if (error) { 4715 if (error) {
4716 printk("GDT-ISA: Unable to allocate IRQ\n"); 4716 printk("GDT-ISA: Unable to allocate IRQ\n");
4717 goto out_host_put; 4717 goto out_host_put;
@@ -4843,7 +4843,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
4843 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", 4843 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
4844 eisa_slot >> 12, ha->irq); 4844 eisa_slot >> 12, ha->irq);
4845 4845
4846 error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); 4846 error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
4847 if (error) { 4847 if (error) {
4848 printk("GDT-EISA: Unable to allocate IRQ\n"); 4848 printk("GDT-EISA: Unable to allocate IRQ\n");
4849 goto out_host_put; 4849 goto out_host_put;
@@ -4979,7 +4979,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
4979 ha->irq); 4979 ha->irq);
4980 4980
4981 error = request_irq(ha->irq, gdth_interrupt, 4981 error = request_irq(ha->irq, gdth_interrupt,
4982 IRQF_DISABLED|IRQF_SHARED, "gdth", ha); 4982 IRQF_SHARED, "gdth", ha);
4983 if (error) { 4983 if (error) {
4984 printk("GDT-PCI: Unable to allocate IRQ\n"); 4984 printk("GDT-PCI: Unable to allocate IRQ\n");
4985 goto out_host_put; 4985 goto out_host_put;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f28ea070d3df..3cbb57a8b846 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -398,7 +398,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
398 shost->ordered_tag = sht->ordered_tag; 398 shost->ordered_tag = sht->ordered_tag;
399 shost->no_write_same = sht->no_write_same; 399 shost->no_write_same = sht->no_write_same;
400 400
401 if (shost_eh_deadline == -1) 401 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
402 shost->eh_deadline = -1; 402 shost->eh_deadline = -1;
403 else if ((ulong) shost_eh_deadline * HZ > INT_MAX) { 403 else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
404 shost_printk(KERN_WARNING, shost, 404 shost_printk(KERN_WARNING, shost,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 868318a7067c..8cf4a0c69baf 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -47,13 +47,13 @@
47#include <linux/string.h> 47#include <linux/string.h>
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/kthread.h>
51#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <asm/div64.h>
52#include "hpsa_cmd.h" 52#include "hpsa_cmd.h"
53#include "hpsa.h" 53#include "hpsa.h"
54 54
55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56#define HPSA_DRIVER_VERSION "3.4.0-1" 56#define HPSA_DRIVER_VERSION "3.4.4-1"
57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58#define HPSA "hpsa" 58#define HPSA "hpsa"
59 59
@@ -118,6 +118,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
121 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
122 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
123 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
124 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
125 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
121 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 126 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
122 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 127 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
123 {0,} 128 {0,}
@@ -163,6 +168,11 @@ static struct board_type products[] = {
163 {0x21C7103C, "Smart Array", &SA5_access}, 168 {0x21C7103C, "Smart Array", &SA5_access},
164 {0x21C8103C, "Smart Array", &SA5_access}, 169 {0x21C8103C, "Smart Array", &SA5_access},
165 {0x21C9103C, "Smart Array", &SA5_access}, 170 {0x21C9103C, "Smart Array", &SA5_access},
171 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
172 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
173 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
174 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
175 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
166 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 176 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
167}; 177};
168 178
@@ -182,8 +192,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
182static struct CommandList *cmd_alloc(struct ctlr_info *h); 192static struct CommandList *cmd_alloc(struct ctlr_info *h);
183static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 193static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
184static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 194static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
185 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 195 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
186 int cmd_type); 196 int cmd_type);
197#define VPD_PAGE (1 << 8)
187 198
188static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 199static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
189static void hpsa_scan_start(struct Scsi_Host *); 200static void hpsa_scan_start(struct Scsi_Host *);
@@ -204,7 +215,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
204 struct CommandList *c); 215 struct CommandList *c);
205/* performant mode helper functions */ 216/* performant mode helper functions */
206static void calc_bucket_map(int *bucket, int num_buckets, 217static void calc_bucket_map(int *bucket, int num_buckets,
207 int nsgs, int *bucket_map); 218 int nsgs, int min_blocks, int *bucket_map);
208static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 219static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
209static inline u32 next_command(struct ctlr_info *h, u8 q); 220static inline u32 next_command(struct ctlr_info *h, u8 q);
210static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 221static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -216,8 +227,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
216static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 227static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
217 int wait_for_ready); 228 int wait_for_ready);
218static inline void finish_cmd(struct CommandList *c); 229static inline void finish_cmd(struct CommandList *c);
230static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
219#define BOARD_NOT_READY 0 231#define BOARD_NOT_READY 0
220#define BOARD_READY 1 232#define BOARD_READY 1
233static void hpsa_drain_accel_commands(struct ctlr_info *h);
234static void hpsa_flush_cache(struct ctlr_info *h);
235static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
236 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
237 u8 *scsi3addr);
221 238
222static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 239static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
223{ 240{
@@ -280,6 +297,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
280 return 1; 297 return 1;
281} 298}
282 299
300static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
301 struct device_attribute *attr,
302 const char *buf, size_t count)
303{
304 int status, len;
305 struct ctlr_info *h;
306 struct Scsi_Host *shost = class_to_shost(dev);
307 char tmpbuf[10];
308
309 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
310 return -EACCES;
311 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
312 strncpy(tmpbuf, buf, len);
313 tmpbuf[len] = '\0';
314 if (sscanf(tmpbuf, "%d", &status) != 1)
315 return -EINVAL;
316 h = shost_to_hba(shost);
317 h->acciopath_status = !!status;
318 dev_warn(&h->pdev->dev,
319 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
320 h->acciopath_status ? "enabled" : "disabled");
321 return count;
322}
323
324static ssize_t host_store_raid_offload_debug(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 int debug_level, len;
329 struct ctlr_info *h;
330 struct Scsi_Host *shost = class_to_shost(dev);
331 char tmpbuf[10];
332
333 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
334 return -EACCES;
335 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
336 strncpy(tmpbuf, buf, len);
337 tmpbuf[len] = '\0';
338 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
339 return -EINVAL;
340 if (debug_level < 0)
341 debug_level = 0;
342 h = shost_to_hba(shost);
343 h->raid_offload_debug = debug_level;
344 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
345 h->raid_offload_debug);
346 return count;
347}
348
283static ssize_t host_store_rescan(struct device *dev, 349static ssize_t host_store_rescan(struct device *dev,
284 struct device_attribute *attr, 350 struct device_attribute *attr,
285 const char *buf, size_t count) 351 const char *buf, size_t count)
@@ -327,6 +393,17 @@ static ssize_t host_show_transport_mode(struct device *dev,
327 "performant" : "simple"); 393 "performant" : "simple");
328} 394}
329 395
396static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct ctlr_info *h;
400 struct Scsi_Host *shost = class_to_shost(dev);
401
402 h = shost_to_hba(shost);
403 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
404 (h->acciopath_status == 1) ? "enabled" : "disabled");
405}
406
330/* List of controllers which cannot be hard reset on kexec with reset_devices */ 407/* List of controllers which cannot be hard reset on kexec with reset_devices */
331static u32 unresettable_controller[] = { 408static u32 unresettable_controller[] = {
332 0x324a103C, /* Smart Array P712m */ 409 0x324a103C, /* Smart Array P712m */
@@ -416,6 +493,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
416static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 493static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
417 "1(ADM)", "UNKNOWN" 494 "1(ADM)", "UNKNOWN"
418}; 495};
496#define HPSA_RAID_0 0
497#define HPSA_RAID_4 1
498#define HPSA_RAID_1 2 /* also used for RAID 10 */
499#define HPSA_RAID_5 3 /* also used for RAID 50 */
500#define HPSA_RAID_51 4
501#define HPSA_RAID_6 5 /* also used for RAID 60 */
502#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
419#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 503#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
420 504
421static ssize_t raid_level_show(struct device *dev, 505static ssize_t raid_level_show(struct device *dev,
@@ -504,10 +588,39 @@ static ssize_t unique_id_show(struct device *dev,
504 sn[12], sn[13], sn[14], sn[15]); 588 sn[12], sn[13], sn[14], sn[15]);
505} 589}
506 590
591static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
592 struct device_attribute *attr, char *buf)
593{
594 struct ctlr_info *h;
595 struct scsi_device *sdev;
596 struct hpsa_scsi_dev_t *hdev;
597 unsigned long flags;
598 int offload_enabled;
599
600 sdev = to_scsi_device(dev);
601 h = sdev_to_hba(sdev);
602 spin_lock_irqsave(&h->lock, flags);
603 hdev = sdev->hostdata;
604 if (!hdev) {
605 spin_unlock_irqrestore(&h->lock, flags);
606 return -ENODEV;
607 }
608 offload_enabled = hdev->offload_enabled;
609 spin_unlock_irqrestore(&h->lock, flags);
610 return snprintf(buf, 20, "%d\n", offload_enabled);
611}
612
507static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 613static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
508static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 614static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
509static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 615static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
510static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 616static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
617static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
618 host_show_hp_ssd_smart_path_enabled, NULL);
619static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
620 host_show_hp_ssd_smart_path_status,
621 host_store_hp_ssd_smart_path_status);
622static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
623 host_store_raid_offload_debug);
511static DEVICE_ATTR(firmware_revision, S_IRUGO, 624static DEVICE_ATTR(firmware_revision, S_IRUGO,
512 host_show_firmware_revision, NULL); 625 host_show_firmware_revision, NULL);
513static DEVICE_ATTR(commands_outstanding, S_IRUGO, 626static DEVICE_ATTR(commands_outstanding, S_IRUGO,
@@ -521,6 +634,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
521 &dev_attr_raid_level, 634 &dev_attr_raid_level,
522 &dev_attr_lunid, 635 &dev_attr_lunid,
523 &dev_attr_unique_id, 636 &dev_attr_unique_id,
637 &dev_attr_hp_ssd_smart_path_enabled,
524 NULL, 638 NULL,
525}; 639};
526 640
@@ -530,6 +644,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
530 &dev_attr_commands_outstanding, 644 &dev_attr_commands_outstanding,
531 &dev_attr_transport_mode, 645 &dev_attr_transport_mode,
532 &dev_attr_resettable, 646 &dev_attr_resettable,
647 &dev_attr_hp_ssd_smart_path_status,
648 &dev_attr_raid_offload_debug,
533 NULL, 649 NULL,
534}; 650};
535 651
@@ -570,6 +686,9 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
570 struct reply_pool *rq = &h->reply_queue[q]; 686 struct reply_pool *rq = &h->reply_queue[q];
571 unsigned long flags; 687 unsigned long flags;
572 688
689 if (h->transMethod & CFGTBL_Trans_io_accel1)
690 return h->access.command_completed(h, q);
691
573 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 692 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
574 return h->access.command_completed(h, q); 693 return h->access.command_completed(h, q);
575 694
@@ -590,6 +709,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
590 return a; 709 return a;
591} 710}
592 711
712/*
713 * There are some special bits in the bus address of the
714 * command that we have to set for the controller to know
715 * how to process the command:
716 *
717 * Normal performant mode:
718 * bit 0: 1 means performant mode, 0 means simple mode.
719 * bits 1-3 = block fetch table entry
720 * bits 4-6 = command type (== 0)
721 *
722 * ioaccel1 mode:
723 * bit 0 = "performant mode" bit.
724 * bits 1-3 = block fetch table entry
725 * bits 4-6 = command type (== 110)
726 * (command type is needed because ioaccel1 mode
727 * commands are submitted through the same register as normal
728 * mode commands, so this is how the controller knows whether
729 * the command is normal mode or ioaccel1 mode.)
730 *
731 * ioaccel2 mode:
732 * bit 0 = "performant mode" bit.
733 * bits 1-4 = block fetch table entry (note extra bit)
734 * bits 4-6 = not needed, because ioaccel2 mode has
735 * a separate special register for submitting commands.
736 */
737
593/* set_performant_mode: Modify the tag for cciss performant 738/* set_performant_mode: Modify the tag for cciss performant
594 * set bit 0 for pull model, bits 3-1 for block fetch 739 * set bit 0 for pull model, bits 3-1 for block fetch
595 * register number 740 * register number
@@ -598,12 +743,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
598{ 743{
599 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 744 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
600 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 745 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
601 if (likely(h->msix_vector)) 746 if (likely(h->msix_vector > 0))
602 c->Header.ReplyQueue = 747 c->Header.ReplyQueue =
603 raw_smp_processor_id() % h->nreply_queues; 748 raw_smp_processor_id() % h->nreply_queues;
604 } 749 }
605} 750}
606 751
752static void set_ioaccel1_performant_mode(struct ctlr_info *h,
753 struct CommandList *c)
754{
755 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
756
757 /* Tell the controller to post the reply to the queue for this
758 * processor. This seems to give the best I/O throughput.
759 */
760 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
761 /* Set the bits in the address sent down to include:
762 * - performant mode bit (bit 0)
763 * - pull count (bits 1-3)
764 * - command type (bits 4-6)
765 */
766 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
767 IOACCEL1_BUSADDR_CMDTYPE;
768}
769
770static void set_ioaccel2_performant_mode(struct ctlr_info *h,
771 struct CommandList *c)
772{
773 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
774
775 /* Tell the controller to post the reply to the queue for this
776 * processor. This seems to give the best I/O throughput.
777 */
778 cp->reply_queue = smp_processor_id() % h->nreply_queues;
779 /* Set the bits in the address sent down to include:
780 * - performant mode bit not used in ioaccel mode 2
781 * - pull count (bits 0-3)
782 * - command type isn't needed for ioaccel2
783 */
784 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
785}
786
607static int is_firmware_flash_cmd(u8 *cdb) 787static int is_firmware_flash_cmd(u8 *cdb)
608{ 788{
609 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 789 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
@@ -638,7 +818,16 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
638{ 818{
639 unsigned long flags; 819 unsigned long flags;
640 820
641 set_performant_mode(h, c); 821 switch (c->cmd_type) {
822 case CMD_IOACCEL1:
823 set_ioaccel1_performant_mode(h, c);
824 break;
825 case CMD_IOACCEL2:
826 set_ioaccel2_performant_mode(h, c);
827 break;
828 default:
829 set_performant_mode(h, c);
830 }
642 dial_down_lockup_detection_during_fw_flash(h, c); 831 dial_down_lockup_detection_during_fw_flash(h, c);
643 spin_lock_irqsave(&h->lock, flags); 832 spin_lock_irqsave(&h->lock, flags);
644 addQ(&h->reqQ, c); 833 addQ(&h->reqQ, c);
@@ -782,6 +971,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
782 971
783 /* Raid level changed. */ 972 /* Raid level changed. */
784 h->dev[entry]->raid_level = new_entry->raid_level; 973 h->dev[entry]->raid_level = new_entry->raid_level;
974
975 /* Raid offload parameters changed. */
976 h->dev[entry]->offload_config = new_entry->offload_config;
977 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
978 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
979 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
980 h->dev[entry]->raid_map = new_entry->raid_map;
981
785 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 982 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
786 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 983 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
787 new_entry->target, new_entry->lun); 984 new_entry->target, new_entry->lun);
@@ -902,6 +1099,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
902 */ 1099 */
903 if (dev1->raid_level != dev2->raid_level) 1100 if (dev1->raid_level != dev2->raid_level)
904 return 1; 1101 return 1;
1102 if (dev1->offload_config != dev2->offload_config)
1103 return 1;
1104 if (dev1->offload_enabled != dev2->offload_enabled)
1105 return 1;
905 return 0; 1106 return 0;
906} 1107}
907 1108
@@ -932,6 +1133,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
932 return DEVICE_UPDATED; 1133 return DEVICE_UPDATED;
933 return DEVICE_SAME; 1134 return DEVICE_SAME;
934 } else { 1135 } else {
1136 /* Keep offline devices offline */
1137 if (needle->volume_offline)
1138 return DEVICE_NOT_FOUND;
935 return DEVICE_CHANGED; 1139 return DEVICE_CHANGED;
936 } 1140 }
937 } 1141 }
@@ -940,6 +1144,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
940 return DEVICE_NOT_FOUND; 1144 return DEVICE_NOT_FOUND;
941} 1145}
942 1146
1147static void hpsa_monitor_offline_device(struct ctlr_info *h,
1148 unsigned char scsi3addr[])
1149{
1150 struct offline_device_entry *device;
1151 unsigned long flags;
1152
1153 /* Check to see if device is already on the list */
1154 spin_lock_irqsave(&h->offline_device_lock, flags);
1155 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1156 if (memcmp(device->scsi3addr, scsi3addr,
1157 sizeof(device->scsi3addr)) == 0) {
1158 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1159 return;
1160 }
1161 }
1162 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1163
1164 /* Device is not on the list, add it. */
1165 device = kmalloc(sizeof(*device), GFP_KERNEL);
1166 if (!device) {
1167 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1168 return;
1169 }
1170 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1171 spin_lock_irqsave(&h->offline_device_lock, flags);
1172 list_add_tail(&device->offline_list, &h->offline_device_list);
1173 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1174}
1175
1176/* Print a message explaining various offline volume states */
1177static void hpsa_show_volume_status(struct ctlr_info *h,
1178 struct hpsa_scsi_dev_t *sd)
1179{
1180 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1181 dev_info(&h->pdev->dev,
1182 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1183 h->scsi_host->host_no,
1184 sd->bus, sd->target, sd->lun);
1185 switch (sd->volume_offline) {
1186 case HPSA_LV_OK:
1187 break;
1188 case HPSA_LV_UNDERGOING_ERASE:
1189 dev_info(&h->pdev->dev,
1190 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1191 h->scsi_host->host_no,
1192 sd->bus, sd->target, sd->lun);
1193 break;
1194 case HPSA_LV_UNDERGOING_RPI:
1195 dev_info(&h->pdev->dev,
1196 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1197 h->scsi_host->host_no,
1198 sd->bus, sd->target, sd->lun);
1199 break;
1200 case HPSA_LV_PENDING_RPI:
1201 dev_info(&h->pdev->dev,
1202 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1203 h->scsi_host->host_no,
1204 sd->bus, sd->target, sd->lun);
1205 break;
1206 case HPSA_LV_ENCRYPTED_NO_KEY:
1207 dev_info(&h->pdev->dev,
1208 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1209 h->scsi_host->host_no,
1210 sd->bus, sd->target, sd->lun);
1211 break;
1212 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1213 dev_info(&h->pdev->dev,
1214 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1215 h->scsi_host->host_no,
1216 sd->bus, sd->target, sd->lun);
1217 break;
1218 case HPSA_LV_UNDERGOING_ENCRYPTION:
1219 dev_info(&h->pdev->dev,
1220 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1221 h->scsi_host->host_no,
1222 sd->bus, sd->target, sd->lun);
1223 break;
1224 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1225 dev_info(&h->pdev->dev,
1226 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1227 h->scsi_host->host_no,
1228 sd->bus, sd->target, sd->lun);
1229 break;
1230 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1231 dev_info(&h->pdev->dev,
1232 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1233 h->scsi_host->host_no,
1234 sd->bus, sd->target, sd->lun);
1235 break;
1236 case HPSA_LV_PENDING_ENCRYPTION:
1237 dev_info(&h->pdev->dev,
1238 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1239 h->scsi_host->host_no,
1240 sd->bus, sd->target, sd->lun);
1241 break;
1242 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1243 dev_info(&h->pdev->dev,
1244 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1245 h->scsi_host->host_no,
1246 sd->bus, sd->target, sd->lun);
1247 break;
1248 }
1249}
1250
943static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1251static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
944 struct hpsa_scsi_dev_t *sd[], int nsds) 1252 struct hpsa_scsi_dev_t *sd[], int nsds)
945{ 1253{
@@ -1004,6 +1312,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1004 for (i = 0; i < nsds; i++) { 1312 for (i = 0; i < nsds; i++) {
1005 if (!sd[i]) /* if already added above. */ 1313 if (!sd[i]) /* if already added above. */
1006 continue; 1314 continue;
1315
1316 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1317 * as the SCSI mid-layer does not handle such devices well.
1318 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1319 * at 160Hz, and prevents the system from coming up.
1320 */
1321 if (sd[i]->volume_offline) {
1322 hpsa_show_volume_status(h, sd[i]);
1323 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1324 h->scsi_host->host_no,
1325 sd[i]->bus, sd[i]->target, sd[i]->lun);
1326 continue;
1327 }
1328
1007 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1329 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1008 h->ndevices, &entry); 1330 h->ndevices, &entry);
1009 if (device_change == DEVICE_NOT_FOUND) { 1331 if (device_change == DEVICE_NOT_FOUND) {
@@ -1022,6 +1344,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1022 } 1344 }
1023 spin_unlock_irqrestore(&h->devlock, flags); 1345 spin_unlock_irqrestore(&h->devlock, flags);
1024 1346
1347 /* Monitor devices which are in one of several NOT READY states to be
1348 * brought online later. This must be done without holding h->devlock,
1349 * so don't touch h->dev[]
1350 */
1351 for (i = 0; i < nsds; i++) {
1352 if (!sd[i]) /* if already added above. */
1353 continue;
1354 if (sd[i]->volume_offline)
1355 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1356 }
1357
1025 /* Don't notify scsi mid layer of any changes the first time through 1358 /* Don't notify scsi mid layer of any changes the first time through
1026 * (or if there are no changes) scsi_scan_host will do it later the 1359 * (or if there are no changes) scsi_scan_host will do it later the
1027 * first time through. 1360 * first time through.
@@ -1187,11 +1520,163 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1187 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1520 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1188} 1521}
1189 1522
1523
1524/* Decode the various types of errors on ioaccel2 path.
1525 * Return 1 for any error that should generate a RAID path retry.
1526 * Return 0 for errors that don't require a RAID path retry.
1527 */
1528static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1529 struct CommandList *c,
1530 struct scsi_cmnd *cmd,
1531 struct io_accel2_cmd *c2)
1532{
1533 int data_len;
1534 int retry = 0;
1535
1536 switch (c2->error_data.serv_response) {
1537 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1538 switch (c2->error_data.status) {
1539 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1540 break;
1541 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1542 dev_warn(&h->pdev->dev,
1543 "%s: task complete with check condition.\n",
1544 "HP SSD Smart Path");
1545 if (c2->error_data.data_present !=
1546 IOACCEL2_SENSE_DATA_PRESENT)
1547 break;
1548 /* copy the sense data */
1549 data_len = c2->error_data.sense_data_len;
1550 if (data_len > SCSI_SENSE_BUFFERSIZE)
1551 data_len = SCSI_SENSE_BUFFERSIZE;
1552 if (data_len > sizeof(c2->error_data.sense_data_buff))
1553 data_len =
1554 sizeof(c2->error_data.sense_data_buff);
1555 memcpy(cmd->sense_buffer,
1556 c2->error_data.sense_data_buff, data_len);
1557 cmd->result |= SAM_STAT_CHECK_CONDITION;
1558 retry = 1;
1559 break;
1560 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1561 dev_warn(&h->pdev->dev,
1562 "%s: task complete with BUSY status.\n",
1563 "HP SSD Smart Path");
1564 retry = 1;
1565 break;
1566 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1567 dev_warn(&h->pdev->dev,
1568 "%s: task complete with reservation conflict.\n",
1569 "HP SSD Smart Path");
1570 retry = 1;
1571 break;
1572 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1573 /* Make scsi midlayer do unlimited retries */
1574 cmd->result = DID_IMM_RETRY << 16;
1575 break;
1576 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1577 dev_warn(&h->pdev->dev,
1578 "%s: task complete with aborted status.\n",
1579 "HP SSD Smart Path");
1580 retry = 1;
1581 break;
1582 default:
1583 dev_warn(&h->pdev->dev,
1584 "%s: task complete with unrecognized status: 0x%02x\n",
1585 "HP SSD Smart Path", c2->error_data.status);
1586 retry = 1;
1587 break;
1588 }
1589 break;
1590 case IOACCEL2_SERV_RESPONSE_FAILURE:
1591 /* don't expect to get here. */
1592 dev_warn(&h->pdev->dev,
1593 "unexpected delivery or target failure, status = 0x%02x\n",
1594 c2->error_data.status);
1595 retry = 1;
1596 break;
1597 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1598 break;
1599 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1600 break;
1601 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1602 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1603 retry = 1;
1604 break;
1605 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1606 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1607 break;
1608 default:
1609 dev_warn(&h->pdev->dev,
1610 "%s: Unrecognized server response: 0x%02x\n",
1611 "HP SSD Smart Path",
1612 c2->error_data.serv_response);
1613 retry = 1;
1614 break;
1615 }
1616
1617 return retry; /* retry on raid path? */
1618}
1619
1620static void process_ioaccel2_completion(struct ctlr_info *h,
1621 struct CommandList *c, struct scsi_cmnd *cmd,
1622 struct hpsa_scsi_dev_t *dev)
1623{
1624 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1625 int raid_retry = 0;
1626
1627 /* check for good status */
1628 if (likely(c2->error_data.serv_response == 0 &&
1629 c2->error_data.status == 0)) {
1630 cmd_free(h, c);
1631 cmd->scsi_done(cmd);
1632 return;
1633 }
1634
1635 /* Any RAID offload error results in retry which will use
1636 * the normal I/O path so the controller can handle whatever's
1637 * wrong.
1638 */
1639 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1640 c2->error_data.serv_response ==
1641 IOACCEL2_SERV_RESPONSE_FAILURE) {
1642 if (c2->error_data.status ==
1643 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1644 dev_warn(&h->pdev->dev,
1645 "%s: Path is unavailable, retrying on standard path.\n",
1646 "HP SSD Smart Path");
1647 else
1648 dev_warn(&h->pdev->dev,
1649 "%s: Error 0x%02x, retrying on standard path.\n",
1650 "HP SSD Smart Path", c2->error_data.status);
1651
1652 dev->offload_enabled = 0;
1653 h->drv_req_rescan = 1; /* schedule controller for a rescan */
1654 cmd->result = DID_SOFT_ERROR << 16;
1655 cmd_free(h, c);
1656 cmd->scsi_done(cmd);
1657 return;
1658 }
1659 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1660 /* If error found, disable Smart Path, schedule a rescan,
1661 * and force a retry on the standard path.
1662 */
1663 if (raid_retry) {
1664 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1665 "HP SSD Smart Path");
1666 dev->offload_enabled = 0; /* Disable Smart Path */
1667 h->drv_req_rescan = 1; /* schedule controller rescan */
1668 cmd->result = DID_SOFT_ERROR << 16;
1669 }
1670 cmd_free(h, c);
1671 cmd->scsi_done(cmd);
1672}
1673
1190static void complete_scsi_command(struct CommandList *cp) 1674static void complete_scsi_command(struct CommandList *cp)
1191{ 1675{
1192 struct scsi_cmnd *cmd; 1676 struct scsi_cmnd *cmd;
1193 struct ctlr_info *h; 1677 struct ctlr_info *h;
1194 struct ErrorInfo *ei; 1678 struct ErrorInfo *ei;
1679 struct hpsa_scsi_dev_t *dev;
1195 1680
1196 unsigned char sense_key; 1681 unsigned char sense_key;
1197 unsigned char asc; /* additional sense code */ 1682 unsigned char asc; /* additional sense code */
@@ -1201,13 +1686,19 @@ static void complete_scsi_command(struct CommandList *cp)
1201 ei = cp->err_info; 1686 ei = cp->err_info;
1202 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1687 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1203 h = cp->h; 1688 h = cp->h;
1689 dev = cmd->device->hostdata;
1204 1690
1205 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1691 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1206 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1692 if ((cp->cmd_type == CMD_SCSI) &&
1693 (cp->Header.SGTotal > h->max_cmd_sg_entries))
1207 hpsa_unmap_sg_chain_block(h, cp); 1694 hpsa_unmap_sg_chain_block(h, cp);
1208 1695
1209 cmd->result = (DID_OK << 16); /* host byte */ 1696 cmd->result = (DID_OK << 16); /* host byte */
1210 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1697 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1698
1699 if (cp->cmd_type == CMD_IOACCEL2)
1700 return process_ioaccel2_completion(h, cp, cmd, dev);
1701
1211 cmd->result |= ei->ScsiStatus; 1702 cmd->result |= ei->ScsiStatus;
1212 1703
1213 /* copy the sense data whether we need to or not. */ 1704 /* copy the sense data whether we need to or not. */
@@ -1227,6 +1718,32 @@ static void complete_scsi_command(struct CommandList *cp)
1227 return; 1718 return;
1228 } 1719 }
1229 1720
1721 /* For I/O accelerator commands, copy over some fields to the normal
1722 * CISS header used below for error handling.
1723 */
1724 if (cp->cmd_type == CMD_IOACCEL1) {
1725 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1726 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1727 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1728 cp->Header.Tag.lower = c->Tag.lower;
1729 cp->Header.Tag.upper = c->Tag.upper;
1730 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1731 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1732
1733 /* Any RAID offload error results in retry which will use
1734 * the normal I/O path so the controller can handle whatever's
1735 * wrong.
1736 */
1737 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1738 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1739 dev->offload_enabled = 0;
1740 cmd->result = DID_SOFT_ERROR << 16;
1741 cmd_free(h, cp);
1742 cmd->scsi_done(cmd);
1743 return;
1744 }
1745 }
1746
1230 /* an error has occurred */ 1747 /* an error has occurred */
1231 switch (ei->CommandStatus) { 1748 switch (ei->CommandStatus) {
1232 1749
@@ -1389,6 +1906,14 @@ static void complete_scsi_command(struct CommandList *cp)
1389 cmd->result = DID_ERROR << 16; 1906 cmd->result = DID_ERROR << 16;
1390 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1907 dev_warn(&h->pdev->dev, "Command unabortable\n");
1391 break; 1908 break;
1909 case CMD_IOACCEL_DISABLED:
1910 /* This only handles the direct pass-through case since RAID
1911 * offload is handled above. Just attempt a retry.
1912 */
1913 cmd->result = DID_SOFT_ERROR << 16;
1914 dev_warn(&h->pdev->dev,
1915 "cp %p had HP SSD Smart Path error\n", cp);
1916 break;
1392 default: 1917 default:
1393 cmd->result = DID_ERROR << 16; 1918 cmd->result = DID_ERROR << 16;
1394 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1919 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1438,6 +1963,7 @@ static int hpsa_map_one(struct pci_dev *pdev,
1438 cp->SG[0].Addr.upper = 1963 cp->SG[0].Addr.upper =
1439 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1964 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1440 cp->SG[0].Len = buflen; 1965 cp->SG[0].Len = buflen;
1966 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1441 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1967 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1442 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1968 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1443 return 0; 1969 return 0;
@@ -1490,17 +2016,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1490 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2016 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1491} 2017}
1492 2018
1493static void hpsa_scsi_interpret_error(struct CommandList *cp) 2019static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2020 struct CommandList *c)
1494{ 2021{
1495 struct ErrorInfo *ei; 2022 const u8 *cdb = c->Request.CDB;
2023 const u8 *lun = c->Header.LUN.LunAddrBytes;
2024
2025 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2026 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2027 txt, lun[0], lun[1], lun[2], lun[3],
2028 lun[4], lun[5], lun[6], lun[7],
2029 cdb[0], cdb[1], cdb[2], cdb[3],
2030 cdb[4], cdb[5], cdb[6], cdb[7],
2031 cdb[8], cdb[9], cdb[10], cdb[11],
2032 cdb[12], cdb[13], cdb[14], cdb[15]);
2033}
2034
2035static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2036 struct CommandList *cp)
2037{
2038 const struct ErrorInfo *ei = cp->err_info;
1496 struct device *d = &cp->h->pdev->dev; 2039 struct device *d = &cp->h->pdev->dev;
2040 const u8 *sd = ei->SenseInfo;
1497 2041
1498 ei = cp->err_info;
1499 switch (ei->CommandStatus) { 2042 switch (ei->CommandStatus) {
1500 case CMD_TARGET_STATUS: 2043 case CMD_TARGET_STATUS:
1501 dev_warn(d, "cmd %p has completed with errors\n", cp); 2044 hpsa_print_cmd(h, "SCSI status", cp);
1502 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 2045 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
1503 ei->ScsiStatus); 2046 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2047 sd[2] & 0x0f, sd[12], sd[13]);
2048 else
2049 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
1504 if (ei->ScsiStatus == 0) 2050 if (ei->ScsiStatus == 0)
1505 dev_warn(d, "SCSI status is abnormally zero. " 2051 dev_warn(d, "SCSI status is abnormally zero. "
1506 "(probably indicates selection timeout " 2052 "(probably indicates selection timeout "
@@ -1508,54 +2054,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
1508 "firmware bug, circa July, 2001.)\n"); 2054 "firmware bug, circa July, 2001.)\n");
1509 break; 2055 break;
1510 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2056 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1511 dev_info(d, "UNDERRUN\n");
1512 break; 2057 break;
1513 case CMD_DATA_OVERRUN: 2058 case CMD_DATA_OVERRUN:
1514 dev_warn(d, "cp %p has completed with data overrun\n", cp); 2059 hpsa_print_cmd(h, "overrun condition", cp);
1515 break; 2060 break;
1516 case CMD_INVALID: { 2061 case CMD_INVALID: {
1517 /* controller unfortunately reports SCSI passthru's 2062 /* controller unfortunately reports SCSI passthru's
1518 * to non-existent targets as invalid commands. 2063 * to non-existent targets as invalid commands.
1519 */ 2064 */
1520 dev_warn(d, "cp %p is reported invalid (probably means " 2065 hpsa_print_cmd(h, "invalid command", cp);
1521 "target device no longer present)\n", cp); 2066 dev_warn(d, "probably means device no longer present\n");
1522 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1523 print_cmd(cp); */
1524 } 2067 }
1525 break; 2068 break;
1526 case CMD_PROTOCOL_ERR: 2069 case CMD_PROTOCOL_ERR:
1527 dev_warn(d, "cp %p has protocol error \n", cp); 2070 hpsa_print_cmd(h, "protocol error", cp);
1528 break; 2071 break;
1529 case CMD_HARDWARE_ERR: 2072 case CMD_HARDWARE_ERR:
1530 /* cmd->result = DID_ERROR << 16; */ 2073 hpsa_print_cmd(h, "hardware error", cp);
1531 dev_warn(d, "cp %p had hardware error\n", cp);
1532 break; 2074 break;
1533 case CMD_CONNECTION_LOST: 2075 case CMD_CONNECTION_LOST:
1534 dev_warn(d, "cp %p had connection lost\n", cp); 2076 hpsa_print_cmd(h, "connection lost", cp);
1535 break; 2077 break;
1536 case CMD_ABORTED: 2078 case CMD_ABORTED:
1537 dev_warn(d, "cp %p was aborted\n", cp); 2079 hpsa_print_cmd(h, "aborted", cp);
1538 break; 2080 break;
1539 case CMD_ABORT_FAILED: 2081 case CMD_ABORT_FAILED:
1540 dev_warn(d, "cp %p reports abort failed\n", cp); 2082 hpsa_print_cmd(h, "abort failed", cp);
1541 break; 2083 break;
1542 case CMD_UNSOLICITED_ABORT: 2084 case CMD_UNSOLICITED_ABORT:
1543 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 2085 hpsa_print_cmd(h, "unsolicited abort", cp);
1544 break; 2086 break;
1545 case CMD_TIMEOUT: 2087 case CMD_TIMEOUT:
1546 dev_warn(d, "cp %p timed out\n", cp); 2088 hpsa_print_cmd(h, "timed out", cp);
1547 break; 2089 break;
1548 case CMD_UNABORTABLE: 2090 case CMD_UNABORTABLE:
1549 dev_warn(d, "Command unabortable\n"); 2091 hpsa_print_cmd(h, "unabortable", cp);
1550 break; 2092 break;
1551 default: 2093 default:
1552 dev_warn(d, "cp %p returned unknown status %x\n", cp, 2094 hpsa_print_cmd(h, "unknown status", cp);
2095 dev_warn(d, "Unknown command status %x\n",
1553 ei->CommandStatus); 2096 ei->CommandStatus);
1554 } 2097 }
1555} 2098}
1556 2099
1557static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2100static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1558 unsigned char page, unsigned char *buf, 2101 u16 page, unsigned char *buf,
1559 unsigned char bufsize) 2102 unsigned char bufsize)
1560{ 2103{
1561 int rc = IO_OK; 2104 int rc = IO_OK;
@@ -1577,7 +2120,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1577 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2120 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1578 ei = c->err_info; 2121 ei = c->err_info;
1579 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2122 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1580 hpsa_scsi_interpret_error(c); 2123 hpsa_scsi_interpret_error(h, c);
1581 rc = -1; 2124 rc = -1;
1582 } 2125 }
1583out: 2126out:
@@ -1585,7 +2128,39 @@ out:
1585 return rc; 2128 return rc;
1586} 2129}
1587 2130
1588static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 2131static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2132 unsigned char *scsi3addr, unsigned char page,
2133 struct bmic_controller_parameters *buf, size_t bufsize)
2134{
2135 int rc = IO_OK;
2136 struct CommandList *c;
2137 struct ErrorInfo *ei;
2138
2139 c = cmd_special_alloc(h);
2140
2141 if (c == NULL) { /* trouble... */
2142 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2143 return -ENOMEM;
2144 }
2145
2146 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2147 page, scsi3addr, TYPE_CMD)) {
2148 rc = -1;
2149 goto out;
2150 }
2151 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2152 ei = c->err_info;
2153 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2154 hpsa_scsi_interpret_error(h, c);
2155 rc = -1;
2156 }
2157out:
2158 cmd_special_free(h, c);
2159 return rc;
2160 }
2161
2162static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2163 u8 reset_type)
1589{ 2164{
1590 int rc = IO_OK; 2165 int rc = IO_OK;
1591 struct CommandList *c; 2166 struct CommandList *c;
@@ -1599,14 +2174,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1599 } 2174 }
1600 2175
1601 /* fill_cmd can't fail here, no data buffer to map. */ 2176 /* fill_cmd can't fail here, no data buffer to map. */
1602 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, 2177 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
1603 NULL, 0, 0, scsi3addr, TYPE_MSG); 2178 scsi3addr, TYPE_MSG);
2179 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
1604 hpsa_scsi_do_simple_cmd_core(h, c); 2180 hpsa_scsi_do_simple_cmd_core(h, c);
1605 /* no unmap needed here because no data xfer. */ 2181 /* no unmap needed here because no data xfer. */
1606 2182
1607 ei = c->err_info; 2183 ei = c->err_info;
1608 if (ei->CommandStatus != 0) { 2184 if (ei->CommandStatus != 0) {
1609 hpsa_scsi_interpret_error(c); 2185 hpsa_scsi_interpret_error(h, c);
1610 rc = -1; 2186 rc = -1;
1611 } 2187 }
1612 cmd_special_free(h, c); 2188 cmd_special_free(h, c);
@@ -1623,7 +2199,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
1623 buf = kzalloc(64, GFP_KERNEL); 2199 buf = kzalloc(64, GFP_KERNEL);
1624 if (!buf) 2200 if (!buf)
1625 return; 2201 return;
1626 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 2202 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
1627 if (rc == 0) 2203 if (rc == 0)
1628 *raid_level = buf[8]; 2204 *raid_level = buf[8];
1629 if (*raid_level > RAID_UNKNOWN) 2205 if (*raid_level > RAID_UNKNOWN)
@@ -1632,6 +2208,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
1632 return; 2208 return;
1633} 2209}
1634 2210
2211#define HPSA_MAP_DEBUG
2212#ifdef HPSA_MAP_DEBUG
2213static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2214 struct raid_map_data *map_buff)
2215{
2216 struct raid_map_disk_data *dd = &map_buff->data[0];
2217 int map, row, col;
2218 u16 map_cnt, row_cnt, disks_per_row;
2219
2220 if (rc != 0)
2221 return;
2222
2223 /* Show details only if debugging has been activated. */
2224 if (h->raid_offload_debug < 2)
2225 return;
2226
2227 dev_info(&h->pdev->dev, "structure_size = %u\n",
2228 le32_to_cpu(map_buff->structure_size));
2229 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2230 le32_to_cpu(map_buff->volume_blk_size));
2231 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2232 le64_to_cpu(map_buff->volume_blk_cnt));
2233 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2234 map_buff->phys_blk_shift);
2235 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2236 map_buff->parity_rotation_shift);
2237 dev_info(&h->pdev->dev, "strip_size = %u\n",
2238 le16_to_cpu(map_buff->strip_size));
2239 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2240 le64_to_cpu(map_buff->disk_starting_blk));
2241 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2242 le64_to_cpu(map_buff->disk_blk_cnt));
2243 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2244 le16_to_cpu(map_buff->data_disks_per_row));
2245 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2246 le16_to_cpu(map_buff->metadata_disks_per_row));
2247 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2248 le16_to_cpu(map_buff->row_cnt));
2249 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2250 le16_to_cpu(map_buff->layout_map_count));
2251 dev_info(&h->pdev->dev, "flags = %u\n",
2252 le16_to_cpu(map_buff->flags));
2253 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2254 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2255 else
2256 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2257 dev_info(&h->pdev->dev, "dekindex = %u\n",
2258 le16_to_cpu(map_buff->dekindex));
2259
2260 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2261 for (map = 0; map < map_cnt; map++) {
2262 dev_info(&h->pdev->dev, "Map%u:\n", map);
2263 row_cnt = le16_to_cpu(map_buff->row_cnt);
2264 for (row = 0; row < row_cnt; row++) {
2265 dev_info(&h->pdev->dev, " Row%u:\n", row);
2266 disks_per_row =
2267 le16_to_cpu(map_buff->data_disks_per_row);
2268 for (col = 0; col < disks_per_row; col++, dd++)
2269 dev_info(&h->pdev->dev,
2270 " D%02u: h=0x%04x xor=%u,%u\n",
2271 col, dd->ioaccel_handle,
2272 dd->xor_mult[0], dd->xor_mult[1]);
2273 disks_per_row =
2274 le16_to_cpu(map_buff->metadata_disks_per_row);
2275 for (col = 0; col < disks_per_row; col++, dd++)
2276 dev_info(&h->pdev->dev,
2277 " M%02u: h=0x%04x xor=%u,%u\n",
2278 col, dd->ioaccel_handle,
2279 dd->xor_mult[0], dd->xor_mult[1]);
2280 }
2281 }
2282}
2283#else
2284static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2285 __attribute__((unused)) int rc,
2286 __attribute__((unused)) struct raid_map_data *map_buff)
2287{
2288}
2289#endif
2290
2291static int hpsa_get_raid_map(struct ctlr_info *h,
2292 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2293{
2294 int rc = 0;
2295 struct CommandList *c;
2296 struct ErrorInfo *ei;
2297
2298 c = cmd_special_alloc(h);
2299 if (c == NULL) {
2300 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2301 return -ENOMEM;
2302 }
2303 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2304 sizeof(this_device->raid_map), 0,
2305 scsi3addr, TYPE_CMD)) {
2306 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2307 cmd_special_free(h, c);
2308 return -ENOMEM;
2309 }
2310 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2311 ei = c->err_info;
2312 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2313 hpsa_scsi_interpret_error(h, c);
2314 cmd_special_free(h, c);
2315 return -1;
2316 }
2317 cmd_special_free(h, c);
2318
2319 /* @todo in the future, dynamically allocate RAID map memory */
2320 if (le32_to_cpu(this_device->raid_map.structure_size) >
2321 sizeof(this_device->raid_map)) {
2322 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2323 rc = -1;
2324 }
2325 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2326 return rc;
2327}
2328
2329static int hpsa_vpd_page_supported(struct ctlr_info *h,
2330 unsigned char scsi3addr[], u8 page)
2331{
2332 int rc;
2333 int i;
2334 int pages;
2335 unsigned char *buf, bufsize;
2336
2337 buf = kzalloc(256, GFP_KERNEL);
2338 if (!buf)
2339 return 0;
2340
2341 /* Get the size of the page list first */
2342 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2343 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2344 buf, HPSA_VPD_HEADER_SZ);
2345 if (rc != 0)
2346 goto exit_unsupported;
2347 pages = buf[3];
2348 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2349 bufsize = pages + HPSA_VPD_HEADER_SZ;
2350 else
2351 bufsize = 255;
2352
2353 /* Get the whole VPD page list */
2354 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2355 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2356 buf, bufsize);
2357 if (rc != 0)
2358 goto exit_unsupported;
2359
2360 pages = buf[3];
2361 for (i = 1; i <= pages; i++)
2362 if (buf[3 + i] == page)
2363 goto exit_supported;
2364exit_unsupported:
2365 kfree(buf);
2366 return 0;
2367exit_supported:
2368 kfree(buf);
2369 return 1;
2370}
2371
2372static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2373 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2374{
2375 int rc;
2376 unsigned char *buf;
2377 u8 ioaccel_status;
2378
2379 this_device->offload_config = 0;
2380 this_device->offload_enabled = 0;
2381
2382 buf = kzalloc(64, GFP_KERNEL);
2383 if (!buf)
2384 return;
2385 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2386 goto out;
2387 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2388 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2389 if (rc != 0)
2390 goto out;
2391
2392#define IOACCEL_STATUS_BYTE 4
2393#define OFFLOAD_CONFIGURED_BIT 0x01
2394#define OFFLOAD_ENABLED_BIT 0x02
2395 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2396 this_device->offload_config =
2397 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2398 if (this_device->offload_config) {
2399 this_device->offload_enabled =
2400 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2401 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2402 this_device->offload_enabled = 0;
2403 }
2404out:
2405 kfree(buf);
2406 return;
2407}
2408
1635/* Get the device id from inquiry page 0x83 */ 2409/* Get the device id from inquiry page 0x83 */
1636static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2410static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1637 unsigned char *device_id, int buflen) 2411 unsigned char *device_id, int buflen)
@@ -1644,7 +2418,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1644 buf = kzalloc(64, GFP_KERNEL); 2418 buf = kzalloc(64, GFP_KERNEL);
1645 if (!buf) 2419 if (!buf)
1646 return -1; 2420 return -1;
1647 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 2421 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
1648 if (rc == 0) 2422 if (rc == 0)
1649 memcpy(device_id, &buf[8], buflen); 2423 memcpy(device_id, &buf[8], buflen);
1650 kfree(buf); 2424 kfree(buf);
@@ -1678,8 +2452,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1678 ei = c->err_info; 2452 ei = c->err_info;
1679 if (ei->CommandStatus != 0 && 2453 if (ei->CommandStatus != 0 &&
1680 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2454 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1681 hpsa_scsi_interpret_error(c); 2455 hpsa_scsi_interpret_error(h, c);
1682 rc = -1; 2456 rc = -1;
2457 } else {
2458 if (buf->extended_response_flag != extended_response) {
2459 dev_err(&h->pdev->dev,
2460 "report luns requested format %u, got %u\n",
2461 extended_response,
2462 buf->extended_response_flag);
2463 rc = -1;
2464 }
1683 } 2465 }
1684out: 2466out:
1685 cmd_special_free(h, c); 2467 cmd_special_free(h, c);
@@ -1707,6 +2489,117 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1707 device->lun = lun; 2489 device->lun = lun;
1708} 2490}
1709 2491
2492/* Use VPD inquiry to get details of volume status */
2493static int hpsa_get_volume_status(struct ctlr_info *h,
2494 unsigned char scsi3addr[])
2495{
2496 int rc;
2497 int status;
2498 int size;
2499 unsigned char *buf;
2500
2501 buf = kzalloc(64, GFP_KERNEL);
2502 if (!buf)
2503 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2504
2505 /* Does controller have VPD for logical volume status? */
2506 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
2507 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
2508 goto exit_failed;
2509 }
2510
2511 /* Get the size of the VPD return buffer */
2512 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2513 buf, HPSA_VPD_HEADER_SZ);
2514 if (rc != 0) {
2515 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2516 goto exit_failed;
2517 }
2518 size = buf[3];
2519
2520 /* Now get the whole VPD buffer */
2521 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2522 buf, size + HPSA_VPD_HEADER_SZ);
2523 if (rc != 0) {
2524 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2525 goto exit_failed;
2526 }
2527 status = buf[4]; /* status byte */
2528
2529 kfree(buf);
2530 return status;
2531exit_failed:
2532 kfree(buf);
2533 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2534}
2535
2536/* Determine offline status of a volume.
2537 * Return either:
2538 * 0 (not offline)
2539 * -1 (offline for unknown reasons)
2540 * # (integer code indicating one of several NOT READY states
2541 * describing why a volume is to be kept offline)
2542 */
2543static unsigned char hpsa_volume_offline(struct ctlr_info *h,
2544 unsigned char scsi3addr[])
2545{
2546 struct CommandList *c;
2547 unsigned char *sense, sense_key, asc, ascq;
2548 int ldstat = 0;
2549 u16 cmd_status;
2550 u8 scsi_status;
2551#define ASC_LUN_NOT_READY 0x04
2552#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2553#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2554
2555 c = cmd_alloc(h);
2556 if (!c)
2557 return 0;
2558 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2559 hpsa_scsi_do_simple_cmd_core(h, c);
2560 sense = c->err_info->SenseInfo;
2561 sense_key = sense[2];
2562 asc = sense[12];
2563 ascq = sense[13];
2564 cmd_status = c->err_info->CommandStatus;
2565 scsi_status = c->err_info->ScsiStatus;
2566 cmd_free(h, c);
2567 /* Is the volume 'not ready'? */
2568 if (cmd_status != CMD_TARGET_STATUS ||
2569 scsi_status != SAM_STAT_CHECK_CONDITION ||
2570 sense_key != NOT_READY ||
2571 asc != ASC_LUN_NOT_READY) {
2572 return 0;
2573 }
2574
2575 /* Determine the reason for not ready state */
2576 ldstat = hpsa_get_volume_status(h, scsi3addr);
2577
2578 /* Keep volume offline in certain cases: */
2579 switch (ldstat) {
2580 case HPSA_LV_UNDERGOING_ERASE:
2581 case HPSA_LV_UNDERGOING_RPI:
2582 case HPSA_LV_PENDING_RPI:
2583 case HPSA_LV_ENCRYPTED_NO_KEY:
2584 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2585 case HPSA_LV_UNDERGOING_ENCRYPTION:
2586 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2587 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2588 return ldstat;
2589 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2590 /* If VPD status page isn't available,
2591 * use ASC/ASCQ to determine state
2592 */
2593 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2594 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2595 return ldstat;
2596 break;
2597 default:
2598 break;
2599 }
2600 return 0;
2601}
2602
1710static int hpsa_update_device_info(struct ctlr_info *h, 2603static int hpsa_update_device_info(struct ctlr_info *h,
1711 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2604 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1712 unsigned char *is_OBDR_device) 2605 unsigned char *is_OBDR_device)
@@ -1745,10 +2638,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1745 sizeof(this_device->device_id)); 2638 sizeof(this_device->device_id));
1746 2639
1747 if (this_device->devtype == TYPE_DISK && 2640 if (this_device->devtype == TYPE_DISK &&
1748 is_logical_dev_addr_mode(scsi3addr)) 2641 is_logical_dev_addr_mode(scsi3addr)) {
1749 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2642 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1750 else 2643 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2644 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2645 this_device->volume_offline =
2646 hpsa_volume_offline(h, scsi3addr);
2647 } else {
1751 this_device->raid_level = RAID_UNKNOWN; 2648 this_device->raid_level = RAID_UNKNOWN;
2649 this_device->offload_config = 0;
2650 this_device->offload_enabled = 0;
2651 this_device->volume_offline = 0;
2652 }
1752 2653
1753 if (is_OBDR_device) { 2654 if (is_OBDR_device) {
1754 /* See if this is a One-Button-Disaster-Recovery device 2655 /* See if this is a One-Button-Disaster-Recovery device
@@ -1878,6 +2779,105 @@ static int add_ext_target_dev(struct ctlr_info *h,
1878} 2779}
1879 2780
1880/* 2781/*
2782 * Get address of physical disk used for an ioaccel2 mode command:
2783 * 1. Extract ioaccel2 handle from the command.
2784 * 2. Find a matching ioaccel2 handle from list of physical disks.
2785 * 3. Return:
2786 * 1 and set scsi3addr to address of matching physical
2787 * 0 if no matching physical disk was found.
2788 */
2789static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2790 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2791{
2792 struct ReportExtendedLUNdata *physicals = NULL;
2793 int responsesize = 24; /* size of physical extended response */
2794 int extended = 2; /* flag forces reporting 'other dev info'. */
2795 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2796 u32 nphysicals = 0; /* number of reported physical devs */
2797 int found = 0; /* found match (1) or not (0) */
2798 u32 find; /* handle we need to match */
2799 int i;
2800 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2801 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2802 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2803 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2804 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2805
2806 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2807 return 0; /* no match */
2808
2809 /* point to the ioaccel2 device handle */
2810 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2811 if (c2a == NULL)
2812 return 0; /* no match */
2813
2814 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2815 if (scmd == NULL)
2816 return 0; /* no match */
2817
2818 d = scmd->device->hostdata;
2819 if (d == NULL)
2820 return 0; /* no match */
2821
2822 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2823 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2824 find = c2a->scsi_nexus;
2825
2826 if (h->raid_offload_debug > 0)
2827 dev_info(&h->pdev->dev,
2828 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2829 __func__, scsi_nexus,
2830 d->device_id[0], d->device_id[1], d->device_id[2],
2831 d->device_id[3], d->device_id[4], d->device_id[5],
2832 d->device_id[6], d->device_id[7], d->device_id[8],
2833 d->device_id[9], d->device_id[10], d->device_id[11],
2834 d->device_id[12], d->device_id[13], d->device_id[14],
2835 d->device_id[15]);
2836
2837 /* Get the list of physical devices */
2838 physicals = kzalloc(reportsize, GFP_KERNEL);
2839 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2840 reportsize, extended)) {
2841 dev_err(&h->pdev->dev,
2842 "Can't lookup %s device handle: report physical LUNs failed.\n",
2843 "HP SSD Smart Path");
2844 kfree(physicals);
2845 return 0;
2846 }
2847 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2848 responsesize;
2849
2850
2851 /* find ioaccel2 handle in list of physicals: */
2852 for (i = 0; i < nphysicals; i++) {
2853 /* handle is in bytes 28-31 of each lun */
2854 if (memcmp(&((struct ReportExtendedLUNdata *)
2855 physicals)->LUN[i][20], &find, 4) != 0) {
2856 continue; /* didn't match */
2857 }
2858 found = 1;
2859 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
2860 physicals)->LUN[i][0], 8);
2861 if (h->raid_offload_debug > 0)
2862 dev_info(&h->pdev->dev,
2863 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2864 __func__, find,
2865 ((struct ReportExtendedLUNdata *)
2866 physicals)->LUN[i][20],
2867 scsi3addr[0], scsi3addr[1], scsi3addr[2],
2868 scsi3addr[3], scsi3addr[4], scsi3addr[5],
2869 scsi3addr[6], scsi3addr[7]);
2870 break; /* found it */
2871 }
2872
2873 kfree(physicals);
2874 if (found)
2875 return 1;
2876 else
2877 return 0;
2878
2879}
2880/*
1881 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2881 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1882 * logdev. The number of luns in physdev and logdev are returned in 2882 * logdev. The number of luns in physdev and logdev are returned in
1883 * *nphysicals and *nlogicals, respectively. 2883 * *nphysicals and *nlogicals, respectively.
@@ -1885,14 +2885,26 @@ static int add_ext_target_dev(struct ctlr_info *h,
1885 */ 2885 */
1886static int hpsa_gather_lun_info(struct ctlr_info *h, 2886static int hpsa_gather_lun_info(struct ctlr_info *h,
1887 int reportlunsize, 2887 int reportlunsize,
1888 struct ReportLUNdata *physdev, u32 *nphysicals, 2888 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
1889 struct ReportLUNdata *logdev, u32 *nlogicals) 2889 struct ReportLUNdata *logdev, u32 *nlogicals)
1890{ 2890{
1891 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 2891 int physical_entry_size = 8;
2892
2893 *physical_mode = 0;
2894
2895 /* For I/O accelerator mode we need to read physical device handles */
2896 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2897 h->transMethod & CFGTBL_Trans_io_accel2) {
2898 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2899 physical_entry_size = 24;
2900 }
2901 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
2902 *physical_mode)) {
1892 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2903 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1893 return -1; 2904 return -1;
1894 } 2905 }
1895 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 2906 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2907 physical_entry_size;
1896 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2908 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1897 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2909 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1898 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2910 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1923,7 +2935,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
1923} 2935}
1924 2936
1925u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2937u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1926 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 2938 int nphysicals, int nlogicals,
2939 struct ReportExtendedLUNdata *physdev_list,
1927 struct ReportLUNdata *logdev_list) 2940 struct ReportLUNdata *logdev_list)
1928{ 2941{
1929 /* Helper function, figure out where the LUN ID info is coming from 2942 /* Helper function, figure out where the LUN ID info is coming from
@@ -1947,6 +2960,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1947 return NULL; 2960 return NULL;
1948} 2961}
1949 2962
2963static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2964{
2965 int rc;
2966 struct bmic_controller_parameters *ctlr_params;
2967 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2968 GFP_KERNEL);
2969
2970 if (!ctlr_params)
2971 return 0;
2972 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2973 sizeof(struct bmic_controller_parameters));
2974 if (rc != 0) {
2975 kfree(ctlr_params);
2976 return 0;
2977 }
2978 return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
2979}
2980
1950static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2981static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1951{ 2982{
1952 /* the idea here is we could get notified 2983 /* the idea here is we could get notified
@@ -1959,16 +2990,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1959 * tell which devices we already know about, vs. new 2990 * tell which devices we already know about, vs. new
1960 * devices, vs. disappearing devices. 2991 * devices, vs. disappearing devices.
1961 */ 2992 */
1962 struct ReportLUNdata *physdev_list = NULL; 2993 struct ReportExtendedLUNdata *physdev_list = NULL;
1963 struct ReportLUNdata *logdev_list = NULL; 2994 struct ReportLUNdata *logdev_list = NULL;
1964 u32 nphysicals = 0; 2995 u32 nphysicals = 0;
1965 u32 nlogicals = 0; 2996 u32 nlogicals = 0;
2997 int physical_mode = 0;
1966 u32 ndev_allocated = 0; 2998 u32 ndev_allocated = 0;
1967 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2999 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1968 int ncurrent = 0; 3000 int ncurrent = 0;
1969 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 3001 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
1970 int i, n_ext_target_devs, ndevs_to_allocate; 3002 int i, n_ext_target_devs, ndevs_to_allocate;
1971 int raid_ctlr_position; 3003 int raid_ctlr_position;
3004 u8 rescan_hba_mode;
1972 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3005 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
1973 3006
1974 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3007 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -1982,8 +3015,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1982 } 3015 }
1983 memset(lunzerobits, 0, sizeof(lunzerobits)); 3016 memset(lunzerobits, 0, sizeof(lunzerobits));
1984 3017
1985 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 3018 rescan_hba_mode = hpsa_hba_mode_enabled(h);
1986 logdev_list, &nlogicals)) 3019
3020 if (!h->hba_mode_enabled && rescan_hba_mode)
3021 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3022 else if (h->hba_mode_enabled && !rescan_hba_mode)
3023 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3024
3025 h->hba_mode_enabled = rescan_hba_mode;
3026
3027 if (hpsa_gather_lun_info(h, reportlunsize,
3028 (struct ReportLUNdata *) physdev_list, &nphysicals,
3029 &physical_mode, logdev_list, &nlogicals))
1987 goto out; 3030 goto out;
1988 3031
1989 /* We might see up to the maximum number of logical and physical disks 3032 /* We might see up to the maximum number of logical and physical disks
@@ -2064,9 +3107,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2064 ncurrent++; 3107 ncurrent++;
2065 break; 3108 break;
2066 case TYPE_DISK: 3109 case TYPE_DISK:
2067 if (i < nphysicals) 3110 if (h->hba_mode_enabled) {
3111 /* never use raid mapper in HBA mode */
3112 this_device->offload_enabled = 0;
3113 ncurrent++;
2068 break; 3114 break;
2069 ncurrent++; 3115 } else if (h->acciopath_status) {
3116 if (i >= nphysicals) {
3117 ncurrent++;
3118 break;
3119 }
3120 } else {
3121 if (i < nphysicals)
3122 break;
3123 ncurrent++;
3124 break;
3125 }
3126 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3127 memcpy(&this_device->ioaccel_handle,
3128 &lunaddrbytes[20],
3129 sizeof(this_device->ioaccel_handle));
3130 ncurrent++;
3131 }
2070 break; 3132 break;
2071 case TYPE_TAPE: 3133 case TYPE_TAPE:
2072 case TYPE_MEDIUM_CHANGER: 3134 case TYPE_MEDIUM_CHANGER:
@@ -2136,7 +3198,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
2136 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3198 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2137 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3199 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2138 curr_sg->Len = len; 3200 curr_sg->Len = len;
2139 curr_sg->Ext = 0; /* we are not chaining */ 3201 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
2140 curr_sg++; 3202 curr_sg++;
2141 } 3203 }
2142 3204
@@ -2160,6 +3222,726 @@ sglist_finished:
2160 return 0; 3222 return 0;
2161} 3223}
2162 3224
3225#define IO_ACCEL_INELIGIBLE (1)
3226static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3227{
3228 int is_write = 0;
3229 u32 block;
3230 u32 block_cnt;
3231
3232 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3233 switch (cdb[0]) {
3234 case WRITE_6:
3235 case WRITE_12:
3236 is_write = 1;
3237 case READ_6:
3238 case READ_12:
3239 if (*cdb_len == 6) {
3240 block = (((u32) cdb[2]) << 8) | cdb[3];
3241 block_cnt = cdb[4];
3242 } else {
3243 BUG_ON(*cdb_len != 12);
3244 block = (((u32) cdb[2]) << 24) |
3245 (((u32) cdb[3]) << 16) |
3246 (((u32) cdb[4]) << 8) |
3247 cdb[5];
3248 block_cnt =
3249 (((u32) cdb[6]) << 24) |
3250 (((u32) cdb[7]) << 16) |
3251 (((u32) cdb[8]) << 8) |
3252 cdb[9];
3253 }
3254 if (block_cnt > 0xffff)
3255 return IO_ACCEL_INELIGIBLE;
3256
3257 cdb[0] = is_write ? WRITE_10 : READ_10;
3258 cdb[1] = 0;
3259 cdb[2] = (u8) (block >> 24);
3260 cdb[3] = (u8) (block >> 16);
3261 cdb[4] = (u8) (block >> 8);
3262 cdb[5] = (u8) (block);
3263 cdb[6] = 0;
3264 cdb[7] = (u8) (block_cnt >> 8);
3265 cdb[8] = (u8) (block_cnt);
3266 cdb[9] = 0;
3267 *cdb_len = 10;
3268 break;
3269 }
3270 return 0;
3271}
3272
3273static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3274 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3275 u8 *scsi3addr)
3276{
3277 struct scsi_cmnd *cmd = c->scsi_cmd;
3278 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3279 unsigned int len;
3280 unsigned int total_len = 0;
3281 struct scatterlist *sg;
3282 u64 addr64;
3283 int use_sg, i;
3284 struct SGDescriptor *curr_sg;
3285 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3286
3287 /* TODO: implement chaining support */
3288 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3289 return IO_ACCEL_INELIGIBLE;
3290
3291 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3292
3293 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3294 return IO_ACCEL_INELIGIBLE;
3295
3296 c->cmd_type = CMD_IOACCEL1;
3297
3298 /* Adjust the DMA address to point to the accelerated command buffer */
3299 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3300 (c->cmdindex * sizeof(*cp));
3301 BUG_ON(c->busaddr & 0x0000007F);
3302
3303 use_sg = scsi_dma_map(cmd);
3304 if (use_sg < 0)
3305 return use_sg;
3306
3307 if (use_sg) {
3308 curr_sg = cp->SG;
3309 scsi_for_each_sg(cmd, sg, use_sg, i) {
3310 addr64 = (u64) sg_dma_address(sg);
3311 len = sg_dma_len(sg);
3312 total_len += len;
3313 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3314 curr_sg->Addr.upper =
3315 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3316 curr_sg->Len = len;
3317
3318 if (i == (scsi_sg_count(cmd) - 1))
3319 curr_sg->Ext = HPSA_SG_LAST;
3320 else
3321 curr_sg->Ext = 0; /* we are not chaining */
3322 curr_sg++;
3323 }
3324
3325 switch (cmd->sc_data_direction) {
3326 case DMA_TO_DEVICE:
3327 control |= IOACCEL1_CONTROL_DATA_OUT;
3328 break;
3329 case DMA_FROM_DEVICE:
3330 control |= IOACCEL1_CONTROL_DATA_IN;
3331 break;
3332 case DMA_NONE:
3333 control |= IOACCEL1_CONTROL_NODATAXFER;
3334 break;
3335 default:
3336 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3337 cmd->sc_data_direction);
3338 BUG();
3339 break;
3340 }
3341 } else {
3342 control |= IOACCEL1_CONTROL_NODATAXFER;
3343 }
3344
3345 c->Header.SGList = use_sg;
3346 /* Fill out the command structure to submit */
3347 cp->dev_handle = ioaccel_handle & 0xFFFF;
3348 cp->transfer_len = total_len;
3349 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
3350 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
3351 cp->control = control;
3352 memcpy(cp->CDB, cdb, cdb_len);
3353 memcpy(cp->CISS_LUN, scsi3addr, 8);
3354 /* Tag was already set at init time. */
3355 enqueue_cmd_and_start_io(h, c);
3356 return 0;
3357}
3358
3359/*
3360 * Queue a command directly to a device behind the controller using the
3361 * I/O accelerator path.
3362 */
3363static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3364 struct CommandList *c)
3365{
3366 struct scsi_cmnd *cmd = c->scsi_cmd;
3367 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3368
3369 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3370 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3371}
3372
3373/*
3374 * Set encryption parameters for the ioaccel2 request
3375 */
3376static void set_encrypt_ioaccel2(struct ctlr_info *h,
3377 struct CommandList *c, struct io_accel2_cmd *cp)
3378{
3379 struct scsi_cmnd *cmd = c->scsi_cmd;
3380 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3381 struct raid_map_data *map = &dev->raid_map;
3382 u64 first_block;
3383
3384 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3385
3386 /* Are we doing encryption on this device */
3387 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3388 return;
3389 /* Set the data encryption key index. */
3390 cp->dekindex = map->dekindex;
3391
3392 /* Set the encryption enable flag, encoded into direction field. */
3393 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3394
3395 /* Set encryption tweak values based on logical block address
3396 * If block size is 512, tweak value is LBA.
3397 * For other block sizes, tweak is (LBA * block size)/ 512)
3398 */
3399 switch (cmd->cmnd[0]) {
3400 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3401 case WRITE_6:
3402 case READ_6:
3403 if (map->volume_blk_size == 512) {
3404 cp->tweak_lower =
3405 (((u32) cmd->cmnd[2]) << 8) |
3406 cmd->cmnd[3];
3407 cp->tweak_upper = 0;
3408 } else {
3409 first_block =
3410 (((u64) cmd->cmnd[2]) << 8) |
3411 cmd->cmnd[3];
3412 first_block = (first_block * map->volume_blk_size)/512;
3413 cp->tweak_lower = (u32)first_block;
3414 cp->tweak_upper = (u32)(first_block >> 32);
3415 }
3416 break;
3417 case WRITE_10:
3418 case READ_10:
3419 if (map->volume_blk_size == 512) {
3420 cp->tweak_lower =
3421 (((u32) cmd->cmnd[2]) << 24) |
3422 (((u32) cmd->cmnd[3]) << 16) |
3423 (((u32) cmd->cmnd[4]) << 8) |
3424 cmd->cmnd[5];
3425 cp->tweak_upper = 0;
3426 } else {
3427 first_block =
3428 (((u64) cmd->cmnd[2]) << 24) |
3429 (((u64) cmd->cmnd[3]) << 16) |
3430 (((u64) cmd->cmnd[4]) << 8) |
3431 cmd->cmnd[5];
3432 first_block = (first_block * map->volume_blk_size)/512;
3433 cp->tweak_lower = (u32)first_block;
3434 cp->tweak_upper = (u32)(first_block >> 32);
3435 }
3436 break;
3437 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3438 case WRITE_12:
3439 case READ_12:
3440 if (map->volume_blk_size == 512) {
3441 cp->tweak_lower =
3442 (((u32) cmd->cmnd[2]) << 24) |
3443 (((u32) cmd->cmnd[3]) << 16) |
3444 (((u32) cmd->cmnd[4]) << 8) |
3445 cmd->cmnd[5];
3446 cp->tweak_upper = 0;
3447 } else {
3448 first_block =
3449 (((u64) cmd->cmnd[2]) << 24) |
3450 (((u64) cmd->cmnd[3]) << 16) |
3451 (((u64) cmd->cmnd[4]) << 8) |
3452 cmd->cmnd[5];
3453 first_block = (first_block * map->volume_blk_size)/512;
3454 cp->tweak_lower = (u32)first_block;
3455 cp->tweak_upper = (u32)(first_block >> 32);
3456 }
3457 break;
3458 case WRITE_16:
3459 case READ_16:
3460 if (map->volume_blk_size == 512) {
3461 cp->tweak_lower =
3462 (((u32) cmd->cmnd[6]) << 24) |
3463 (((u32) cmd->cmnd[7]) << 16) |
3464 (((u32) cmd->cmnd[8]) << 8) |
3465 cmd->cmnd[9];
3466 cp->tweak_upper =
3467 (((u32) cmd->cmnd[2]) << 24) |
3468 (((u32) cmd->cmnd[3]) << 16) |
3469 (((u32) cmd->cmnd[4]) << 8) |
3470 cmd->cmnd[5];
3471 } else {
3472 first_block =
3473 (((u64) cmd->cmnd[2]) << 56) |
3474 (((u64) cmd->cmnd[3]) << 48) |
3475 (((u64) cmd->cmnd[4]) << 40) |
3476 (((u64) cmd->cmnd[5]) << 32) |
3477 (((u64) cmd->cmnd[6]) << 24) |
3478 (((u64) cmd->cmnd[7]) << 16) |
3479 (((u64) cmd->cmnd[8]) << 8) |
3480 cmd->cmnd[9];
3481 first_block = (first_block * map->volume_blk_size)/512;
3482 cp->tweak_lower = (u32)first_block;
3483 cp->tweak_upper = (u32)(first_block >> 32);
3484 }
3485 break;
3486 default:
3487 dev_err(&h->pdev->dev,
3488 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3489 __func__);
3490 BUG();
3491 break;
3492 }
3493}
3494
3495static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3496 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3497 u8 *scsi3addr)
3498{
3499 struct scsi_cmnd *cmd = c->scsi_cmd;
3500 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3501 struct ioaccel2_sg_element *curr_sg;
3502 int use_sg, i;
3503 struct scatterlist *sg;
3504 u64 addr64;
3505 u32 len;
3506 u32 total_len = 0;
3507
3508 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3509 return IO_ACCEL_INELIGIBLE;
3510
3511 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3512 return IO_ACCEL_INELIGIBLE;
3513 c->cmd_type = CMD_IOACCEL2;
3514 /* Adjust the DMA address to point to the accelerated command buffer */
3515 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3516 (c->cmdindex * sizeof(*cp));
3517 BUG_ON(c->busaddr & 0x0000007F);
3518
3519 memset(cp, 0, sizeof(*cp));
3520 cp->IU_type = IOACCEL2_IU_TYPE;
3521
3522 use_sg = scsi_dma_map(cmd);
3523 if (use_sg < 0)
3524 return use_sg;
3525
3526 if (use_sg) {
3527 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3528 curr_sg = cp->sg;
3529 scsi_for_each_sg(cmd, sg, use_sg, i) {
3530 addr64 = (u64) sg_dma_address(sg);
3531 len = sg_dma_len(sg);
3532 total_len += len;
3533 curr_sg->address = cpu_to_le64(addr64);
3534 curr_sg->length = cpu_to_le32(len);
3535 curr_sg->reserved[0] = 0;
3536 curr_sg->reserved[1] = 0;
3537 curr_sg->reserved[2] = 0;
3538 curr_sg->chain_indicator = 0;
3539 curr_sg++;
3540 }
3541
3542 switch (cmd->sc_data_direction) {
3543 case DMA_TO_DEVICE:
3544 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3545 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3546 break;
3547 case DMA_FROM_DEVICE:
3548 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3549 cp->direction |= IOACCEL2_DIR_DATA_IN;
3550 break;
3551 case DMA_NONE:
3552 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3553 cp->direction |= IOACCEL2_DIR_NO_DATA;
3554 break;
3555 default:
3556 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3557 cmd->sc_data_direction);
3558 BUG();
3559 break;
3560 }
3561 } else {
3562 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3563 cp->direction |= IOACCEL2_DIR_NO_DATA;
3564 }
3565
3566 /* Set encryption parameters, if necessary */
3567 set_encrypt_ioaccel2(h, c, cp);
3568
3569 cp->scsi_nexus = ioaccel_handle;
3570 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
3571 DIRECT_LOOKUP_BIT;
3572 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3573
3574 /* fill in sg elements */
3575 cp->sg_count = (u8) use_sg;
3576
3577 cp->data_len = cpu_to_le32(total_len);
3578 cp->err_ptr = cpu_to_le64(c->busaddr +
3579 offsetof(struct io_accel2_cmd, error_data));
3580 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3581
3582 enqueue_cmd_and_start_io(h, c);
3583 return 0;
3584}
3585
3586/*
3587 * Queue a command to the correct I/O accelerator path.
3588 */
3589static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3590 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3591 u8 *scsi3addr)
3592{
3593 if (h->transMethod & CFGTBL_Trans_io_accel1)
3594 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3595 cdb, cdb_len, scsi3addr);
3596 else
3597 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3598 cdb, cdb_len, scsi3addr);
3599}
3600
3601static void raid_map_helper(struct raid_map_data *map,
3602 int offload_to_mirror, u32 *map_index, u32 *current_group)
3603{
3604 if (offload_to_mirror == 0) {
3605 /* use physical disk in the first mirrored group. */
3606 *map_index %= map->data_disks_per_row;
3607 return;
3608 }
3609 do {
3610 /* determine mirror group that *map_index indicates */
3611 *current_group = *map_index / map->data_disks_per_row;
3612 if (offload_to_mirror == *current_group)
3613 continue;
3614 if (*current_group < (map->layout_map_count - 1)) {
3615 /* select map index from next group */
3616 *map_index += map->data_disks_per_row;
3617 (*current_group)++;
3618 } else {
3619 /* select map index from first group */
3620 *map_index %= map->data_disks_per_row;
3621 *current_group = 0;
3622 }
3623 } while (offload_to_mirror != *current_group);
3624}
3625
3626/*
3627 * Attempt to perform offload RAID mapping for a logical volume I/O.
3628 */
3629static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3630 struct CommandList *c)
3631{
3632 struct scsi_cmnd *cmd = c->scsi_cmd;
3633 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3634 struct raid_map_data *map = &dev->raid_map;
3635 struct raid_map_disk_data *dd = &map->data[0];
3636 int is_write = 0;
3637 u32 map_index;
3638 u64 first_block, last_block;
3639 u32 block_cnt;
3640 u32 blocks_per_row;
3641 u64 first_row, last_row;
3642 u32 first_row_offset, last_row_offset;
3643 u32 first_column, last_column;
3644 u64 r0_first_row, r0_last_row;
3645 u32 r5or6_blocks_per_row;
3646 u64 r5or6_first_row, r5or6_last_row;
3647 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3648 u32 r5or6_first_column, r5or6_last_column;
3649 u32 total_disks_per_row;
3650 u32 stripesize;
3651 u32 first_group, last_group, current_group;
3652 u32 map_row;
3653 u32 disk_handle;
3654 u64 disk_block;
3655 u32 disk_block_cnt;
3656 u8 cdb[16];
3657 u8 cdb_len;
3658#if BITS_PER_LONG == 32
3659 u64 tmpdiv;
3660#endif
3661 int offload_to_mirror;
3662
3663 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3664
3665 /* check for valid opcode, get LBA and block count */
3666 switch (cmd->cmnd[0]) {
3667 case WRITE_6:
3668 is_write = 1;
3669 case READ_6:
3670 first_block =
3671 (((u64) cmd->cmnd[2]) << 8) |
3672 cmd->cmnd[3];
3673 block_cnt = cmd->cmnd[4];
3674 break;
3675 case WRITE_10:
3676 is_write = 1;
3677 case READ_10:
3678 first_block =
3679 (((u64) cmd->cmnd[2]) << 24) |
3680 (((u64) cmd->cmnd[3]) << 16) |
3681 (((u64) cmd->cmnd[4]) << 8) |
3682 cmd->cmnd[5];
3683 block_cnt =
3684 (((u32) cmd->cmnd[7]) << 8) |
3685 cmd->cmnd[8];
3686 break;
3687 case WRITE_12:
3688 is_write = 1;
3689 case READ_12:
3690 first_block =
3691 (((u64) cmd->cmnd[2]) << 24) |
3692 (((u64) cmd->cmnd[3]) << 16) |
3693 (((u64) cmd->cmnd[4]) << 8) |
3694 cmd->cmnd[5];
3695 block_cnt =
3696 (((u32) cmd->cmnd[6]) << 24) |
3697 (((u32) cmd->cmnd[7]) << 16) |
3698 (((u32) cmd->cmnd[8]) << 8) |
3699 cmd->cmnd[9];
3700 break;
3701 case WRITE_16:
3702 is_write = 1;
3703 case READ_16:
3704 first_block =
3705 (((u64) cmd->cmnd[2]) << 56) |
3706 (((u64) cmd->cmnd[3]) << 48) |
3707 (((u64) cmd->cmnd[4]) << 40) |
3708 (((u64) cmd->cmnd[5]) << 32) |
3709 (((u64) cmd->cmnd[6]) << 24) |
3710 (((u64) cmd->cmnd[7]) << 16) |
3711 (((u64) cmd->cmnd[8]) << 8) |
3712 cmd->cmnd[9];
3713 block_cnt =
3714 (((u32) cmd->cmnd[10]) << 24) |
3715 (((u32) cmd->cmnd[11]) << 16) |
3716 (((u32) cmd->cmnd[12]) << 8) |
3717 cmd->cmnd[13];
3718 break;
3719 default:
3720 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3721 }
3722 BUG_ON(block_cnt == 0);
3723 last_block = first_block + block_cnt - 1;
3724
3725 /* check for write to non-RAID-0 */
3726 if (is_write && dev->raid_level != 0)
3727 return IO_ACCEL_INELIGIBLE;
3728
3729 /* check for invalid block or wraparound */
3730 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3731 return IO_ACCEL_INELIGIBLE;
3732
3733 /* calculate stripe information for the request */
3734 blocks_per_row = map->data_disks_per_row * map->strip_size;
3735#if BITS_PER_LONG == 32
3736 tmpdiv = first_block;
3737 (void) do_div(tmpdiv, blocks_per_row);
3738 first_row = tmpdiv;
3739 tmpdiv = last_block;
3740 (void) do_div(tmpdiv, blocks_per_row);
3741 last_row = tmpdiv;
3742 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3743 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3744 tmpdiv = first_row_offset;
3745 (void) do_div(tmpdiv, map->strip_size);
3746 first_column = tmpdiv;
3747 tmpdiv = last_row_offset;
3748 (void) do_div(tmpdiv, map->strip_size);
3749 last_column = tmpdiv;
3750#else
3751 first_row = first_block / blocks_per_row;
3752 last_row = last_block / blocks_per_row;
3753 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3754 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3755 first_column = first_row_offset / map->strip_size;
3756 last_column = last_row_offset / map->strip_size;
3757#endif
3758
3759 /* if this isn't a single row/column then give to the controller */
3760 if ((first_row != last_row) || (first_column != last_column))
3761 return IO_ACCEL_INELIGIBLE;
3762
3763 /* proceeding with driver mapping */
3764 total_disks_per_row = map->data_disks_per_row +
3765 map->metadata_disks_per_row;
3766 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3767 map->row_cnt;
3768 map_index = (map_row * total_disks_per_row) + first_column;
3769
3770 switch (dev->raid_level) {
3771 case HPSA_RAID_0:
3772 break; /* nothing special to do */
3773 case HPSA_RAID_1:
3774 /* Handles load balance across RAID 1 members.
3775 * (2-drive R1 and R10 with even # of drives.)
3776 * Appropriate for SSDs, not optimal for HDDs
3777 */
3778 BUG_ON(map->layout_map_count != 2);
3779 if (dev->offload_to_mirror)
3780 map_index += map->data_disks_per_row;
3781 dev->offload_to_mirror = !dev->offload_to_mirror;
3782 break;
3783 case HPSA_RAID_ADM:
3784 /* Handles N-way mirrors (R1-ADM)
3785 * and R10 with # of drives divisible by 3.)
3786 */
3787 BUG_ON(map->layout_map_count != 3);
3788
3789 offload_to_mirror = dev->offload_to_mirror;
3790 raid_map_helper(map, offload_to_mirror,
3791 &map_index, &current_group);
3792 /* set mirror group to use next time */
3793 offload_to_mirror =
3794 (offload_to_mirror >= map->layout_map_count - 1)
3795 ? 0 : offload_to_mirror + 1;
3796 /* FIXME: remove after debug/dev */
3797 BUG_ON(offload_to_mirror >= map->layout_map_count);
3798 dev_warn(&h->pdev->dev,
3799 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3800 map_index, offload_to_mirror);
3801 dev->offload_to_mirror = offload_to_mirror;
3802 /* Avoid direct use of dev->offload_to_mirror within this
3803 * function since multiple threads might simultaneously
3804 * increment it beyond the range of dev->layout_map_count -1.
3805 */
3806 break;
3807 case HPSA_RAID_5:
3808 case HPSA_RAID_6:
3809 if (map->layout_map_count <= 1)
3810 break;
3811
3812 /* Verify first and last block are in same RAID group */
3813 r5or6_blocks_per_row =
3814 map->strip_size * map->data_disks_per_row;
3815 BUG_ON(r5or6_blocks_per_row == 0);
3816 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3817#if BITS_PER_LONG == 32
3818 tmpdiv = first_block;
3819 first_group = do_div(tmpdiv, stripesize);
3820 tmpdiv = first_group;
3821 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3822 first_group = tmpdiv;
3823 tmpdiv = last_block;
3824 last_group = do_div(tmpdiv, stripesize);
3825 tmpdiv = last_group;
3826 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3827 last_group = tmpdiv;
3828#else
3829 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3830 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3831#endif
3832 if (first_group != last_group)
3833 return IO_ACCEL_INELIGIBLE;
3834
3835 /* Verify request is in a single row of RAID 5/6 */
3836#if BITS_PER_LONG == 32
3837 tmpdiv = first_block;
3838 (void) do_div(tmpdiv, stripesize);
3839 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3840 tmpdiv = last_block;
3841 (void) do_div(tmpdiv, stripesize);
3842 r5or6_last_row = r0_last_row = tmpdiv;
3843#else
3844 first_row = r5or6_first_row = r0_first_row =
3845 first_block / stripesize;
3846 r5or6_last_row = r0_last_row = last_block / stripesize;
3847#endif
3848 if (r5or6_first_row != r5or6_last_row)
3849 return IO_ACCEL_INELIGIBLE;
3850
3851
3852 /* Verify request is in a single column */
3853#if BITS_PER_LONG == 32
3854 tmpdiv = first_block;
3855 first_row_offset = do_div(tmpdiv, stripesize);
3856 tmpdiv = first_row_offset;
3857 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3858 r5or6_first_row_offset = first_row_offset;
3859 tmpdiv = last_block;
3860 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3861 tmpdiv = r5or6_last_row_offset;
3862 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3863 tmpdiv = r5or6_first_row_offset;
3864 (void) do_div(tmpdiv, map->strip_size);
3865 first_column = r5or6_first_column = tmpdiv;
3866 tmpdiv = r5or6_last_row_offset;
3867 (void) do_div(tmpdiv, map->strip_size);
3868 r5or6_last_column = tmpdiv;
3869#else
3870 first_row_offset = r5or6_first_row_offset =
3871 (u32)((first_block % stripesize) %
3872 r5or6_blocks_per_row);
3873
3874 r5or6_last_row_offset =
3875 (u32)((last_block % stripesize) %
3876 r5or6_blocks_per_row);
3877
3878 first_column = r5or6_first_column =
3879 r5or6_first_row_offset / map->strip_size;
3880 r5or6_last_column =
3881 r5or6_last_row_offset / map->strip_size;
3882#endif
3883 if (r5or6_first_column != r5or6_last_column)
3884 return IO_ACCEL_INELIGIBLE;
3885
3886 /* Request is eligible */
3887 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3888 map->row_cnt;
3889
3890 map_index = (first_group *
3891 (map->row_cnt * total_disks_per_row)) +
3892 (map_row * total_disks_per_row) + first_column;
3893 break;
3894 default:
3895 return IO_ACCEL_INELIGIBLE;
3896 }
3897
3898 disk_handle = dd[map_index].ioaccel_handle;
3899 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3900 (first_row_offset - (first_column * map->strip_size));
3901 disk_block_cnt = block_cnt;
3902
3903 /* handle differing logical/physical block sizes */
3904 if (map->phys_blk_shift) {
3905 disk_block <<= map->phys_blk_shift;
3906 disk_block_cnt <<= map->phys_blk_shift;
3907 }
3908 BUG_ON(disk_block_cnt > 0xffff);
3909
3910 /* build the new CDB for the physical disk I/O */
3911 if (disk_block > 0xffffffff) {
3912 cdb[0] = is_write ? WRITE_16 : READ_16;
3913 cdb[1] = 0;
3914 cdb[2] = (u8) (disk_block >> 56);
3915 cdb[3] = (u8) (disk_block >> 48);
3916 cdb[4] = (u8) (disk_block >> 40);
3917 cdb[5] = (u8) (disk_block >> 32);
3918 cdb[6] = (u8) (disk_block >> 24);
3919 cdb[7] = (u8) (disk_block >> 16);
3920 cdb[8] = (u8) (disk_block >> 8);
3921 cdb[9] = (u8) (disk_block);
3922 cdb[10] = (u8) (disk_block_cnt >> 24);
3923 cdb[11] = (u8) (disk_block_cnt >> 16);
3924 cdb[12] = (u8) (disk_block_cnt >> 8);
3925 cdb[13] = (u8) (disk_block_cnt);
3926 cdb[14] = 0;
3927 cdb[15] = 0;
3928 cdb_len = 16;
3929 } else {
3930 cdb[0] = is_write ? WRITE_10 : READ_10;
3931 cdb[1] = 0;
3932 cdb[2] = (u8) (disk_block >> 24);
3933 cdb[3] = (u8) (disk_block >> 16);
3934 cdb[4] = (u8) (disk_block >> 8);
3935 cdb[5] = (u8) (disk_block);
3936 cdb[6] = 0;
3937 cdb[7] = (u8) (disk_block_cnt >> 8);
3938 cdb[8] = (u8) (disk_block_cnt);
3939 cdb[9] = 0;
3940 cdb_len = 10;
3941 }
3942 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3943 dev->scsi3addr);
3944}
2163 3945
2164static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3946static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2165 void (*done)(struct scsi_cmnd *)) 3947 void (*done)(struct scsi_cmnd *))
@@ -2169,6 +3951,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2169 unsigned char scsi3addr[8]; 3951 unsigned char scsi3addr[8];
2170 struct CommandList *c; 3952 struct CommandList *c;
2171 unsigned long flags; 3953 unsigned long flags;
3954 int rc = 0;
2172 3955
2173 /* Get the ptr to our adapter structure out of cmd->host. */ 3956 /* Get the ptr to our adapter structure out of cmd->host. */
2174 h = sdev_to_hba(cmd->device); 3957 h = sdev_to_hba(cmd->device);
@@ -2203,6 +3986,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2203 3986
2204 c->cmd_type = CMD_SCSI; 3987 c->cmd_type = CMD_SCSI;
2205 c->scsi_cmd = cmd; 3988 c->scsi_cmd = cmd;
3989
3990 /* Call alternate submit routine for I/O accelerated commands.
3991 * Retries always go down the normal I/O path.
3992 */
3993 if (likely(cmd->retries == 0 &&
3994 cmd->request->cmd_type == REQ_TYPE_FS &&
3995 h->acciopath_status)) {
3996 if (dev->offload_enabled) {
3997 rc = hpsa_scsi_ioaccel_raid_map(h, c);
3998 if (rc == 0)
3999 return 0; /* Sent on ioaccel path */
4000 if (rc < 0) { /* scsi_dma_map failed. */
4001 cmd_free(h, c);
4002 return SCSI_MLQUEUE_HOST_BUSY;
4003 }
4004 } else if (dev->ioaccel_handle) {
4005 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4006 if (rc == 0)
4007 return 0; /* Sent on direct map path */
4008 if (rc < 0) { /* scsi_dma_map failed. */
4009 cmd_free(h, c);
4010 return SCSI_MLQUEUE_HOST_BUSY;
4011 }
4012 }
4013 }
4014
2206 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4015 c->Header.ReplyQueue = 0; /* unused in simple mode */
2207 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 4016 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2208 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 4017 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
@@ -2262,11 +4071,38 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
2262 4071
2263static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 4072static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2264 4073
4074static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4075{
4076 unsigned long flags;
4077
4078 /*
4079 * Don't let rescans be initiated on a controller known
4080 * to be locked up. If the controller locks up *during*
4081 * a rescan, that thread is probably hosed, but at least
4082 * we can prevent new rescan threads from piling up on a
4083 * locked up controller.
4084 */
4085 spin_lock_irqsave(&h->lock, flags);
4086 if (unlikely(h->lockup_detected)) {
4087 spin_unlock_irqrestore(&h->lock, flags);
4088 spin_lock_irqsave(&h->scan_lock, flags);
4089 h->scan_finished = 1;
4090 wake_up_all(&h->scan_wait_queue);
4091 spin_unlock_irqrestore(&h->scan_lock, flags);
4092 return 1;
4093 }
4094 spin_unlock_irqrestore(&h->lock, flags);
4095 return 0;
4096}
4097
2265static void hpsa_scan_start(struct Scsi_Host *sh) 4098static void hpsa_scan_start(struct Scsi_Host *sh)
2266{ 4099{
2267 struct ctlr_info *h = shost_to_hba(sh); 4100 struct ctlr_info *h = shost_to_hba(sh);
2268 unsigned long flags; 4101 unsigned long flags;
2269 4102
4103 if (do_not_scan_if_controller_locked_up(h))
4104 return;
4105
2270 /* wait until any scan already in progress is finished. */ 4106 /* wait until any scan already in progress is finished. */
2271 while (1) { 4107 while (1) {
2272 spin_lock_irqsave(&h->scan_lock, flags); 4108 spin_lock_irqsave(&h->scan_lock, flags);
@@ -2283,6 +4119,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
2283 h->scan_finished = 0; /* mark scan as in progress */ 4119 h->scan_finished = 0; /* mark scan as in progress */
2284 spin_unlock_irqrestore(&h->scan_lock, flags); 4120 spin_unlock_irqrestore(&h->scan_lock, flags);
2285 4121
4122 if (do_not_scan_if_controller_locked_up(h))
4123 return;
4124
2286 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 4125 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2287 4126
2288 spin_lock_irqsave(&h->scan_lock, flags); 4127 spin_lock_irqsave(&h->scan_lock, flags);
@@ -2346,7 +4185,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)
2346 sh->max_lun = HPSA_MAX_LUN; 4185 sh->max_lun = HPSA_MAX_LUN;
2347 sh->max_id = HPSA_MAX_LUN; 4186 sh->max_id = HPSA_MAX_LUN;
2348 sh->can_queue = h->nr_cmds; 4187 sh->can_queue = h->nr_cmds;
2349 sh->cmd_per_lun = h->nr_cmds; 4188 if (h->hba_mode_enabled)
4189 sh->cmd_per_lun = 7;
4190 else
4191 sh->cmd_per_lun = h->nr_cmds;
2350 sh->sg_tablesize = h->maxsgentries; 4192 sh->sg_tablesize = h->maxsgentries;
2351 h->scsi_host = sh; 4193 h->scsi_host = sh;
2352 sh->hostdata[0] = (unsigned long) h; 4194 sh->hostdata[0] = (unsigned long) h;
@@ -2372,7 +4214,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)
2372static int wait_for_device_to_become_ready(struct ctlr_info *h, 4214static int wait_for_device_to_become_ready(struct ctlr_info *h,
2373 unsigned char lunaddr[]) 4215 unsigned char lunaddr[])
2374{ 4216{
2375 int rc = 0; 4217 int rc;
2376 int count = 0; 4218 int count = 0;
2377 int waittime = 1; /* seconds */ 4219 int waittime = 1; /* seconds */
2378 struct CommandList *c; 4220 struct CommandList *c;
@@ -2392,6 +4234,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
2392 */ 4234 */
2393 msleep(1000 * waittime); 4235 msleep(1000 * waittime);
2394 count++; 4236 count++;
4237 rc = 0; /* Device ready. */
2395 4238
2396 /* Increase wait time with each try, up to a point. */ 4239 /* Increase wait time with each try, up to a point. */
2397 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4240 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
@@ -2448,7 +4291,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2448 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4291 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2449 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4292 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2450 /* send a reset to the SCSI LUN which the command was sent to */ 4293 /* send a reset to the SCSI LUN which the command was sent to */
2451 rc = hpsa_send_reset(h, dev->scsi3addr); 4294 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
2452 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4295 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2453 return SUCCESS; 4296 return SUCCESS;
2454 4297
@@ -2471,12 +4314,36 @@ static void swizzle_abort_tag(u8 *tag)
2471 tag[7] = original_tag[4]; 4314 tag[7] = original_tag[4];
2472} 4315}
2473 4316
4317static void hpsa_get_tag(struct ctlr_info *h,
4318 struct CommandList *c, u32 *taglower, u32 *tagupper)
4319{
4320 if (c->cmd_type == CMD_IOACCEL1) {
4321 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4322 &h->ioaccel_cmd_pool[c->cmdindex];
4323 *tagupper = cm1->Tag.upper;
4324 *taglower = cm1->Tag.lower;
4325 return;
4326 }
4327 if (c->cmd_type == CMD_IOACCEL2) {
4328 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4329 &h->ioaccel2_cmd_pool[c->cmdindex];
4330 /* upper tag not used in ioaccel2 mode */
4331 memset(tagupper, 0, sizeof(*tagupper));
4332 *taglower = cm2->Tag;
4333 return;
4334 }
4335 *tagupper = c->Header.Tag.upper;
4336 *taglower = c->Header.Tag.lower;
4337}
4338
4339
2474static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4340static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2475 struct CommandList *abort, int swizzle) 4341 struct CommandList *abort, int swizzle)
2476{ 4342{
2477 int rc = IO_OK; 4343 int rc = IO_OK;
2478 struct CommandList *c; 4344 struct CommandList *c;
2479 struct ErrorInfo *ei; 4345 struct ErrorInfo *ei;
4346 u32 tagupper, taglower;
2480 4347
2481 c = cmd_special_alloc(h); 4348 c = cmd_special_alloc(h);
2482 if (c == NULL) { /* trouble... */ 4349 if (c == NULL) { /* trouble... */
@@ -2490,8 +4357,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2490 if (swizzle) 4357 if (swizzle)
2491 swizzle_abort_tag(&c->Request.CDB[4]); 4358 swizzle_abort_tag(&c->Request.CDB[4]);
2492 hpsa_scsi_do_simple_cmd_core(h, c); 4359 hpsa_scsi_do_simple_cmd_core(h, c);
4360 hpsa_get_tag(h, abort, &taglower, &tagupper);
2493 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4361 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2494 __func__, abort->Header.Tag.upper, abort->Header.Tag.lower); 4362 __func__, tagupper, taglower);
2495 /* no unmap needed here because no data xfer. */ 4363 /* no unmap needed here because no data xfer. */
2496 4364
2497 ei = c->err_info; 4365 ei = c->err_info;
@@ -2503,15 +4371,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
2503 break; 4371 break;
2504 default: 4372 default:
2505 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4373 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
2506 __func__, abort->Header.Tag.upper, 4374 __func__, tagupper, taglower);
2507 abort->Header.Tag.lower); 4375 hpsa_scsi_interpret_error(h, c);
2508 hpsa_scsi_interpret_error(c);
2509 rc = -1; 4376 rc = -1;
2510 break; 4377 break;
2511 } 4378 }
2512 cmd_special_free(h, c); 4379 cmd_special_free(h, c);
2513 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, 4380 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
2514 abort->Header.Tag.upper, abort->Header.Tag.lower); 4381 __func__, tagupper, taglower);
2515 return rc; 4382 return rc;
2516} 4383}
2517 4384
@@ -2565,6 +4432,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
2565 return NULL; 4432 return NULL;
2566} 4433}
2567 4434
4435/* ioaccel2 path firmware cannot handle abort task requests.
4436 * Change abort requests to physical target reset, and send to the
4437 * address of the physical disk used for the ioaccel 2 command.
4438 * Return 0 on success (IO_OK)
4439 * -1 on failure
4440 */
4441
4442static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4443 unsigned char *scsi3addr, struct CommandList *abort)
4444{
4445 int rc = IO_OK;
4446 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4447 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4448 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4449 unsigned char *psa = &phys_scsi3addr[0];
4450
4451 /* Get a pointer to the hpsa logical device. */
4452 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4453 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4454 if (dev == NULL) {
4455 dev_warn(&h->pdev->dev,
4456 "Cannot abort: no device pointer for command.\n");
4457 return -1; /* not abortable */
4458 }
4459
4460 if (h->raid_offload_debug > 0)
4461 dev_info(&h->pdev->dev,
4462 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4463 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4464 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4465 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4466
4467 if (!dev->offload_enabled) {
4468 dev_warn(&h->pdev->dev,
4469 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4470 return -1; /* not abortable */
4471 }
4472
4473 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4474 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4475 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4476 return -1; /* not abortable */
4477 }
4478
4479 /* send the reset */
4480 if (h->raid_offload_debug > 0)
4481 dev_info(&h->pdev->dev,
4482 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4483 psa[0], psa[1], psa[2], psa[3],
4484 psa[4], psa[5], psa[6], psa[7]);
4485 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4486 if (rc != 0) {
4487 dev_warn(&h->pdev->dev,
4488 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4489 psa[0], psa[1], psa[2], psa[3],
4490 psa[4], psa[5], psa[6], psa[7]);
4491 return rc; /* failed to reset */
4492 }
4493
4494 /* wait for device to recover */
4495 if (wait_for_device_to_become_ready(h, psa) != 0) {
4496 dev_warn(&h->pdev->dev,
4497 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4498 psa[0], psa[1], psa[2], psa[3],
4499 psa[4], psa[5], psa[6], psa[7]);
4500 return -1; /* failed to recover */
4501 }
4502
4503 /* device recovered */
4504 dev_info(&h->pdev->dev,
4505 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4506 psa[0], psa[1], psa[2], psa[3],
4507 psa[4], psa[5], psa[6], psa[7]);
4508
4509 return rc; /* success */
4510}
4511
2568/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4512/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
2569 * tell which kind we're dealing with, so we send the abort both ways. There 4513 * tell which kind we're dealing with, so we send the abort both ways. There
2570 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4514 * shouldn't be any collisions between swizzled and unswizzled tags due to the
@@ -2578,6 +4522,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
2578 struct CommandList *c; 4522 struct CommandList *c;
2579 int rc = 0, rc2 = 0; 4523 int rc = 0, rc2 = 0;
2580 4524
4525 /* ioccelerator mode 2 commands should be aborted via the
4526 * accelerated path, since RAID path is unaware of these commands,
4527 * but underlying firmware can't handle abort TMF.
4528 * Change abort to physical device reset.
4529 */
4530 if (abort->cmd_type == CMD_IOACCEL2)
4531 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4532
2581 /* we do not expect to find the swizzled tag in our queue, but 4533 /* we do not expect to find the swizzled tag in our queue, but
2582 * check anyway just to be sure the assumptions which make this 4534 * check anyway just to be sure the assumptions which make this
2583 * the case haven't become wrong. 4535 * the case haven't become wrong.
@@ -2616,6 +4568,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2616 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4568 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
2617 char msg[256]; /* For debug messaging. */ 4569 char msg[256]; /* For debug messaging. */
2618 int ml = 0; 4570 int ml = 0;
4571 u32 tagupper, taglower;
2619 4572
2620 /* Find the controller of the command to be aborted */ 4573 /* Find the controller of the command to be aborted */
2621 h = sdev_to_hba(sc->device); 4574 h = sdev_to_hba(sc->device);
@@ -2648,9 +4601,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2648 msg); 4601 msg);
2649 return FAILED; 4602 return FAILED;
2650 } 4603 }
2651 4604 hpsa_get_tag(h, abort, &taglower, &tagupper);
2652 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", 4605 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
2653 abort->Header.Tag.upper, abort->Header.Tag.lower);
2654 as = (struct scsi_cmnd *) abort->scsi_cmd; 4606 as = (struct scsi_cmnd *) abort->scsi_cmd;
2655 if (as != NULL) 4607 if (as != NULL)
2656 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4608 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
@@ -2776,6 +4728,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2776 return NULL; 4728 return NULL;
2777 memset(c, 0, sizeof(*c)); 4729 memset(c, 0, sizeof(*c));
2778 4730
4731 c->cmd_type = CMD_SCSI;
2779 c->cmdindex = -1; 4732 c->cmdindex = -1;
2780 4733
2781 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 4734 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
@@ -3038,7 +4991,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
3038 c->SG[0].Addr.lower = temp64.val32.lower; 4991 c->SG[0].Addr.lower = temp64.val32.lower;
3039 c->SG[0].Addr.upper = temp64.val32.upper; 4992 c->SG[0].Addr.upper = temp64.val32.upper;
3040 c->SG[0].Len = iocommand.buf_size; 4993 c->SG[0].Len = iocommand.buf_size;
3041 c->SG[0].Ext = 0; /* we are not chaining*/ 4994 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
3042 } 4995 }
3043 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4996 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3044 if (iocommand.buf_size > 0) 4997 if (iocommand.buf_size > 0)
@@ -3168,8 +5121,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
3168 c->SG[i].Addr.lower = temp64.val32.lower; 5121 c->SG[i].Addr.lower = temp64.val32.lower;
3169 c->SG[i].Addr.upper = temp64.val32.upper; 5122 c->SG[i].Addr.upper = temp64.val32.upper;
3170 c->SG[i].Len = buff_size[i]; 5123 c->SG[i].Len = buff_size[i];
3171 /* we are not chaining */ 5124 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
3172 c->SG[i].Ext = 0;
3173 } 5125 }
3174 } 5126 }
3175 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5127 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
@@ -3304,7 +5256,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
3304} 5256}
3305 5257
3306static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 5258static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3307 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 5259 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
3308 int cmd_type) 5260 int cmd_type)
3309{ 5261{
3310 int pci_dir = XFER_NONE; 5262 int pci_dir = XFER_NONE;
@@ -3327,9 +5279,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3327 switch (cmd) { 5279 switch (cmd) {
3328 case HPSA_INQUIRY: 5280 case HPSA_INQUIRY:
3329 /* are we trying to read a vital product page */ 5281 /* are we trying to read a vital product page */
3330 if (page_code != 0) { 5282 if (page_code & VPD_PAGE) {
3331 c->Request.CDB[1] = 0x01; 5283 c->Request.CDB[1] = 0x01;
3332 c->Request.CDB[2] = page_code; 5284 c->Request.CDB[2] = (page_code & 0xff);
3333 } 5285 }
3334 c->Request.CDBLen = 6; 5286 c->Request.CDBLen = 6;
3335 c->Request.Type.Attribute = ATTR_SIMPLE; 5287 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3369,6 +5321,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3369 c->Request.Type.Direction = XFER_NONE; 5321 c->Request.Type.Direction = XFER_NONE;
3370 c->Request.Timeout = 0; 5322 c->Request.Timeout = 0;
3371 break; 5323 break;
5324 case HPSA_GET_RAID_MAP:
5325 c->Request.CDBLen = 12;
5326 c->Request.Type.Attribute = ATTR_SIMPLE;
5327 c->Request.Type.Direction = XFER_READ;
5328 c->Request.Timeout = 0;
5329 c->Request.CDB[0] = HPSA_CISS_READ;
5330 c->Request.CDB[1] = cmd;
5331 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5332 c->Request.CDB[7] = (size >> 16) & 0xFF;
5333 c->Request.CDB[8] = (size >> 8) & 0xFF;
5334 c->Request.CDB[9] = size & 0xFF;
5335 break;
5336 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5337 c->Request.CDBLen = 10;
5338 c->Request.Type.Attribute = ATTR_SIMPLE;
5339 c->Request.Type.Direction = XFER_READ;
5340 c->Request.Timeout = 0;
5341 c->Request.CDB[0] = BMIC_READ;
5342 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5343 c->Request.CDB[7] = (size >> 16) & 0xFF;
5344 c->Request.CDB[8] = (size >> 8) & 0xFF;
5345 break;
3372 default: 5346 default:
3373 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5347 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
3374 BUG(); 5348 BUG();
@@ -3562,7 +5536,8 @@ static inline void finish_cmd(struct CommandList *c)
3562 spin_unlock_irqrestore(&h->lock, flags); 5536 spin_unlock_irqrestore(&h->lock, flags);
3563 5537
3564 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5538 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
3565 if (likely(c->cmd_type == CMD_SCSI)) 5539 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5540 || c->cmd_type == CMD_IOACCEL2))
3566 complete_scsi_command(c); 5541 complete_scsi_command(c);
3567 else if (c->cmd_type == CMD_IOCTL_PEND) 5542 else if (c->cmd_type == CMD_IOCTL_PEND)
3568 complete(c->waiting); 5543 complete(c->waiting);
@@ -4169,21 +6144,24 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
4169 goto default_int_mode; 6144 goto default_int_mode;
4170 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6145 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
4171 dev_info(&h->pdev->dev, "MSIX\n"); 6146 dev_info(&h->pdev->dev, "MSIX\n");
6147 h->msix_vector = MAX_REPLY_QUEUES;
4172 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6148 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
4173 MAX_REPLY_QUEUES); 6149 h->msix_vector);
4174 if (!err) {
4175 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4176 h->intr[i] = hpsa_msix_entries[i].vector;
4177 h->msix_vector = 1;
4178 return;
4179 }
4180 if (err > 0) { 6150 if (err > 0) {
4181 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 6151 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
4182 "available\n", err); 6152 "available\n", err);
4183 goto default_int_mode; 6153 h->msix_vector = err;
6154 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6155 h->msix_vector);
6156 }
6157 if (!err) {
6158 for (i = 0; i < h->msix_vector; i++)
6159 h->intr[i] = hpsa_msix_entries[i].vector;
6160 return;
4184 } else { 6161 } else {
4185 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 6162 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
4186 err); 6163 err);
6164 h->msix_vector = 0;
4187 goto default_int_mode; 6165 goto default_int_mode;
4188 } 6166 }
4189 } 6167 }
@@ -4336,6 +6314,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
4336 hpsa_get_max_perf_mode_cmds(h); 6314 hpsa_get_max_perf_mode_cmds(h);
4337 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 6315 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
4338 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 6316 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6317 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
4339 /* 6318 /*
4340 * Limit in-command s/g elements to 32 save dma'able memory. 6319 * Limit in-command s/g elements to 32 save dma'able memory.
4341 * Howvever spec says if 0, use 31 6320 * Howvever spec says if 0, use 31
@@ -4352,6 +6331,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)
4352 6331
4353 /* Find out what task management functions are supported and cache */ 6332 /* Find out what task management functions are supported and cache */
4354 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 6333 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6334 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6335 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6336 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6337 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
4355} 6338}
4356 6339
4357static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 6340static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -4390,6 +6373,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
4390 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 6373 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
4391} 6374}
4392 6375
6376static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6377{
6378 int i;
6379 u32 doorbell_value;
6380 unsigned long flags;
6381 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6382 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6383 spin_lock_irqsave(&h->lock, flags);
6384 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6385 spin_unlock_irqrestore(&h->lock, flags);
6386 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6387 break;
6388 /* delay and try again */
6389 msleep(20);
6390 }
6391}
6392
4393static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 6393static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
4394{ 6394{
4395 int i; 6395 int i;
@@ -4420,18 +6420,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
4420 return -ENOTSUPP; 6420 return -ENOTSUPP;
4421 6421
4422 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6422 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6423
4423 /* Update the field, and then ring the doorbell */ 6424 /* Update the field, and then ring the doorbell */
4424 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6425 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6426 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
4425 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6427 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4426 hpsa_wait_for_mode_change_ack(h); 6428 hpsa_wait_for_mode_change_ack(h);
4427 print_cfg_table(&h->pdev->dev, h->cfgtable); 6429 print_cfg_table(&h->pdev->dev, h->cfgtable);
4428 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 6430 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
4429 dev_warn(&h->pdev->dev, 6431 goto error;
4430 "unable to get board into simple mode\n");
4431 return -ENODEV;
4432 }
4433 h->transMethod = CFGTBL_Trans_Simple; 6432 h->transMethod = CFGTBL_Trans_Simple;
4434 return 0; 6433 return 0;
6434error:
6435 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6436 return -ENODEV;
4435} 6437}
4436 6438
4437static int hpsa_pci_init(struct ctlr_info *h) 6439static int hpsa_pci_init(struct ctlr_info *h)
@@ -4577,11 +6579,19 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
4577 pci_free_consistent(h->pdev, 6579 pci_free_consistent(h->pdev,
4578 h->nr_cmds * sizeof(struct CommandList), 6580 h->nr_cmds * sizeof(struct CommandList),
4579 h->cmd_pool, h->cmd_pool_dhandle); 6581 h->cmd_pool, h->cmd_pool_dhandle);
6582 if (h->ioaccel2_cmd_pool)
6583 pci_free_consistent(h->pdev,
6584 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6585 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
4580 if (h->errinfo_pool) 6586 if (h->errinfo_pool)
4581 pci_free_consistent(h->pdev, 6587 pci_free_consistent(h->pdev,
4582 h->nr_cmds * sizeof(struct ErrorInfo), 6588 h->nr_cmds * sizeof(struct ErrorInfo),
4583 h->errinfo_pool, 6589 h->errinfo_pool,
4584 h->errinfo_pool_dhandle); 6590 h->errinfo_pool_dhandle);
6591 if (h->ioaccel_cmd_pool)
6592 pci_free_consistent(h->pdev,
6593 h->nr_cmds * sizeof(struct io_accel1_cmd),
6594 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
4585} 6595}
4586 6596
4587static int hpsa_request_irq(struct ctlr_info *h, 6597static int hpsa_request_irq(struct ctlr_info *h,
@@ -4597,15 +6607,15 @@ static int hpsa_request_irq(struct ctlr_info *h,
4597 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6607 for (i = 0; i < MAX_REPLY_QUEUES; i++)
4598 h->q[i] = (u8) i; 6608 h->q[i] = (u8) i;
4599 6609
4600 if (h->intr_mode == PERF_MODE_INT && h->msix_vector) { 6610 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
4601 /* If performant mode and MSI-X, use multiple reply queues */ 6611 /* If performant mode and MSI-X, use multiple reply queues */
4602 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6612 for (i = 0; i < h->msix_vector; i++)
4603 rc = request_irq(h->intr[i], msixhandler, 6613 rc = request_irq(h->intr[i], msixhandler,
4604 0, h->devname, 6614 0, h->devname,
4605 &h->q[i]); 6615 &h->q[i]);
4606 } else { 6616 } else {
4607 /* Use single reply pool */ 6617 /* Use single reply pool */
4608 if (h->msix_vector || h->msi_vector) { 6618 if (h->msix_vector > 0 || h->msi_vector) {
4609 rc = request_irq(h->intr[h->intr_mode], 6619 rc = request_irq(h->intr[h->intr_mode],
4610 msixhandler, 0, h->devname, 6620 msixhandler, 0, h->devname,
4611 &h->q[h->intr_mode]); 6621 &h->q[h->intr_mode]);
@@ -4658,7 +6668,7 @@ static void free_irqs(struct ctlr_info *h)
4658 return; 6668 return;
4659 } 6669 }
4660 6670
4661 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6671 for (i = 0; i < h->msix_vector; i++)
4662 free_irq(h->intr[i], &h->q[i]); 6672 free_irq(h->intr[i], &h->q[i]);
4663} 6673}
4664 6674
@@ -4681,6 +6691,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4681 hpsa_free_irqs_and_disable_msix(h); 6691 hpsa_free_irqs_and_disable_msix(h);
4682 hpsa_free_sg_chain_blocks(h); 6692 hpsa_free_sg_chain_blocks(h);
4683 hpsa_free_cmd_pool(h); 6693 hpsa_free_cmd_pool(h);
6694 kfree(h->ioaccel1_blockFetchTable);
4684 kfree(h->blockFetchTable); 6695 kfree(h->blockFetchTable);
4685 pci_free_consistent(h->pdev, h->reply_pool_size, 6696 pci_free_consistent(h->pdev, h->reply_pool_size,
4686 h->reply_pool, h->reply_pool_dhandle); 6697 h->reply_pool, h->reply_pool_dhandle);
@@ -4760,6 +6771,92 @@ static void detect_controller_lockup(struct ctlr_info *h)
4760 h->last_heartbeat_timestamp = now; 6771 h->last_heartbeat_timestamp = now;
4761} 6772}
4762 6773
6774static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6775{
6776 int i;
6777 char *event_type;
6778
6779 /* Clear the driver-requested rescan flag */
6780 h->drv_req_rescan = 0;
6781
6782 /* Ask the controller to clear the events we're handling. */
6783 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6784 | CFGTBL_Trans_io_accel2)) &&
6785 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6786 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6787
6788 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6789 event_type = "state change";
6790 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6791 event_type = "configuration change";
6792 /* Stop sending new RAID offload reqs via the IO accelerator */
6793 scsi_block_requests(h->scsi_host);
6794 for (i = 0; i < h->ndevices; i++)
6795 h->dev[i]->offload_enabled = 0;
6796 hpsa_drain_accel_commands(h);
6797 /* Set 'accelerator path config change' bit */
6798 dev_warn(&h->pdev->dev,
6799 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6800 h->events, event_type);
6801 writel(h->events, &(h->cfgtable->clear_event_notify));
6802 /* Set the "clear event notify field update" bit 6 */
6803 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6804 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6805 hpsa_wait_for_clear_event_notify_ack(h);
6806 scsi_unblock_requests(h->scsi_host);
6807 } else {
6808 /* Acknowledge controller notification events. */
6809 writel(h->events, &(h->cfgtable->clear_event_notify));
6810 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6811 hpsa_wait_for_clear_event_notify_ack(h);
6812#if 0
6813 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6814 hpsa_wait_for_mode_change_ack(h);
6815#endif
6816 }
6817 return;
6818}
6819
6820/* Check a register on the controller to see if there are configuration
6821 * changes (added/changed/removed logical drives, etc.) which mean that
6822 * we should rescan the controller for devices.
6823 * Also check flag for driver-initiated rescan.
6824 */
6825static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6826{
6827 if (h->drv_req_rescan)
6828 return 1;
6829
6830 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6831 return 0;
6832
6833 h->events = readl(&(h->cfgtable->event_notify));
6834 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6835}
6836
6837/*
6838 * Check if any of the offline devices have become ready
6839 */
6840static int hpsa_offline_devices_ready(struct ctlr_info *h)
6841{
6842 unsigned long flags;
6843 struct offline_device_entry *d;
6844 struct list_head *this, *tmp;
6845
6846 spin_lock_irqsave(&h->offline_device_lock, flags);
6847 list_for_each_safe(this, tmp, &h->offline_device_list) {
6848 d = list_entry(this, struct offline_device_entry,
6849 offline_list);
6850 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6851 if (!hpsa_volume_offline(h, d->scsi3addr))
6852 return 1;
6853 spin_lock_irqsave(&h->offline_device_lock, flags);
6854 }
6855 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6856 return 0;
6857}
6858
6859
4763static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6860static void hpsa_monitor_ctlr_worker(struct work_struct *work)
4764{ 6861{
4765 unsigned long flags; 6862 unsigned long flags;
@@ -4768,6 +6865,15 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
4768 detect_controller_lockup(h); 6865 detect_controller_lockup(h);
4769 if (h->lockup_detected) 6866 if (h->lockup_detected)
4770 return; 6867 return;
6868
6869 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6870 scsi_host_get(h->scsi_host);
6871 h->drv_req_rescan = 0;
6872 hpsa_ack_ctlr_events(h);
6873 hpsa_scan_start(h->scsi_host);
6874 scsi_host_put(h->scsi_host);
6875 }
6876
4771 spin_lock_irqsave(&h->lock, flags); 6877 spin_lock_irqsave(&h->lock, flags);
4772 if (h->remove_in_progress) { 6878 if (h->remove_in_progress) {
4773 spin_unlock_irqrestore(&h->lock, flags); 6879 spin_unlock_irqrestore(&h->lock, flags);
@@ -4807,7 +6913,7 @@ reinit_after_soft_reset:
4807 * the 5 lower bits of the address are used by the hardware. and by 6913 * the 5 lower bits of the address are used by the hardware. and by
4808 * the driver. See comments in hpsa.h for more info. 6914 * the driver. See comments in hpsa.h for more info.
4809 */ 6915 */
4810#define COMMANDLIST_ALIGNMENT 32 6916#define COMMANDLIST_ALIGNMENT 128
4811 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6917 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
4812 h = kzalloc(sizeof(*h), GFP_KERNEL); 6918 h = kzalloc(sizeof(*h), GFP_KERNEL);
4813 if (!h) 6919 if (!h)
@@ -4817,7 +6923,9 @@ reinit_after_soft_reset:
4817 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6923 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
4818 INIT_LIST_HEAD(&h->cmpQ); 6924 INIT_LIST_HEAD(&h->cmpQ);
4819 INIT_LIST_HEAD(&h->reqQ); 6925 INIT_LIST_HEAD(&h->reqQ);
6926 INIT_LIST_HEAD(&h->offline_device_list);
4820 spin_lock_init(&h->lock); 6927 spin_lock_init(&h->lock);
6928 spin_lock_init(&h->offline_device_lock);
4821 spin_lock_init(&h->scan_lock); 6929 spin_lock_init(&h->scan_lock);
4822 spin_lock_init(&h->passthru_count_lock); 6930 spin_lock_init(&h->passthru_count_lock);
4823 rc = hpsa_pci_init(h); 6931 rc = hpsa_pci_init(h);
@@ -4859,6 +6967,7 @@ reinit_after_soft_reset:
4859 6967
4860 pci_set_drvdata(pdev, h); 6968 pci_set_drvdata(pdev, h);
4861 h->ndevices = 0; 6969 h->ndevices = 0;
6970 h->hba_mode_enabled = 0;
4862 h->scsi_host = NULL; 6971 h->scsi_host = NULL;
4863 spin_lock_init(&h->devlock); 6972 spin_lock_init(&h->devlock);
4864 hpsa_put_ctlr_into_performant_mode(h); 6973 hpsa_put_ctlr_into_performant_mode(h);
@@ -4918,6 +7027,11 @@ reinit_after_soft_reset:
4918 goto reinit_after_soft_reset; 7027 goto reinit_after_soft_reset;
4919 } 7028 }
4920 7029
7030 /* Enable Accelerated IO path at driver layer */
7031 h->acciopath_status = 1;
7032
7033 h->drv_req_rescan = 0;
7034
4921 /* Turn the interrupts on so we can service requests */ 7035 /* Turn the interrupts on so we can service requests */
4922 h->access.set_intr_mask(h, HPSA_INTR_ON); 7036 h->access.set_intr_mask(h, HPSA_INTR_ON);
4923 7037
@@ -5034,6 +7148,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
5034 h->reply_pool, h->reply_pool_dhandle); 7148 h->reply_pool, h->reply_pool_dhandle);
5035 kfree(h->cmd_pool_bits); 7149 kfree(h->cmd_pool_bits);
5036 kfree(h->blockFetchTable); 7150 kfree(h->blockFetchTable);
7151 kfree(h->ioaccel1_blockFetchTable);
7152 kfree(h->ioaccel2_blockFetchTable);
5037 kfree(h->hba_inquiry_data); 7153 kfree(h->hba_inquiry_data);
5038 pci_disable_device(pdev); 7154 pci_disable_device(pdev);
5039 pci_release_regions(pdev); 7155 pci_release_regions(pdev);
@@ -5074,20 +7190,17 @@ static struct pci_driver hpsa_pci_driver = {
5074 * bits of the command address. 7190 * bits of the command address.
5075 */ 7191 */
5076static void calc_bucket_map(int bucket[], int num_buckets, 7192static void calc_bucket_map(int bucket[], int num_buckets,
5077 int nsgs, int *bucket_map) 7193 int nsgs, int min_blocks, int *bucket_map)
5078{ 7194{
5079 int i, j, b, size; 7195 int i, j, b, size;
5080 7196
5081 /* even a command with 0 SGs requires 4 blocks */
5082#define MINIMUM_TRANSFER_BLOCKS 4
5083#define NUM_BUCKETS 8
5084 /* Note, bucket_map must have nsgs+1 entries. */ 7197 /* Note, bucket_map must have nsgs+1 entries. */
5085 for (i = 0; i <= nsgs; i++) { 7198 for (i = 0; i <= nsgs; i++) {
5086 /* Compute size of a command with i SG entries */ 7199 /* Compute size of a command with i SG entries */
5087 size = i + MINIMUM_TRANSFER_BLOCKS; 7200 size = i + min_blocks;
5088 b = num_buckets; /* Assume the biggest bucket */ 7201 b = num_buckets; /* Assume the biggest bucket */
5089 /* Find the bucket that is just big enough */ 7202 /* Find the bucket that is just big enough */
5090 for (j = 0; j < 8; j++) { 7203 for (j = 0; j < num_buckets; j++) {
5091 if (bucket[j] >= size) { 7204 if (bucket[j] >= size) {
5092 b = j; 7205 b = j;
5093 break; 7206 break;
@@ -5098,10 +7211,16 @@ static void calc_bucket_map(int bucket[], int num_buckets,
5098 } 7211 }
5099} 7212}
5100 7213
5101static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) 7214static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
5102{ 7215{
5103 int i; 7216 int i;
5104 unsigned long register_value; 7217 unsigned long register_value;
7218 unsigned long transMethod = CFGTBL_Trans_Performant |
7219 (trans_support & CFGTBL_Trans_use_short_tags) |
7220 CFGTBL_Trans_enable_directed_msix |
7221 (trans_support & (CFGTBL_Trans_io_accel1 |
7222 CFGTBL_Trans_io_accel2));
7223 struct access_method access = SA5_performant_access;
5105 7224
5106 /* This is a bit complicated. There are 8 registers on 7225 /* This is a bit complicated. There are 8 registers on
5107 * the controller which we write to to tell it 8 different 7226 * the controller which we write to to tell it 8 different
@@ -5121,6 +7240,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
5121 * sizes for small commands, and fewer sizes for larger commands. 7240 * sizes for small commands, and fewer sizes for larger commands.
5122 */ 7241 */
5123 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 7242 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7243#define MIN_IOACCEL2_BFT_ENTRY 5
7244#define HPSA_IOACCEL2_HEADER_SZ 4
7245 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7246 13, 14, 15, 16, 17, 18, 19,
7247 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7248 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7249 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7250 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7251 16 * MIN_IOACCEL2_BFT_ENTRY);
7252 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
5124 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 7253 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
5125 /* 5 = 1 s/g entry or 4k 7254 /* 5 = 1 s/g entry or 4k
5126 * 6 = 2 s/g entry or 8k 7255 * 6 = 2 s/g entry or 8k
@@ -5133,7 +7262,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
5133 7262
5134 bft[7] = SG_ENTRIES_IN_CMD + 4; 7263 bft[7] = SG_ENTRIES_IN_CMD + 4;
5135 calc_bucket_map(bft, ARRAY_SIZE(bft), 7264 calc_bucket_map(bft, ARRAY_SIZE(bft),
5136 SG_ENTRIES_IN_CMD, h->blockFetchTable); 7265 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
5137 for (i = 0; i < 8; i++) 7266 for (i = 0; i < 8; i++)
5138 writel(bft[i], &h->transtable->BlockFetch[i]); 7267 writel(bft[i], &h->transtable->BlockFetch[i]);
5139 7268
@@ -5150,9 +7279,22 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
5150 &h->transtable->RepQAddr[i].lower); 7279 &h->transtable->RepQAddr[i].lower);
5151 } 7280 }
5152 7281
5153 writel(CFGTBL_Trans_Performant | use_short_tags | 7282 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
5154 CFGTBL_Trans_enable_directed_msix, 7283 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
5155 &(h->cfgtable->HostWrite.TransportRequest)); 7284 /*
7285 * enable outbound interrupt coalescing in accelerator mode;
7286 */
7287 if (trans_support & CFGTBL_Trans_io_accel1) {
7288 access = SA5_ioaccel_mode1_access;
7289 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7290 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7291 } else {
7292 if (trans_support & CFGTBL_Trans_io_accel2) {
7293 access = SA5_ioaccel_mode2_access;
7294 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7295 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7296 }
7297 }
5156 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7298 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5157 hpsa_wait_for_mode_change_ack(h); 7299 hpsa_wait_for_mode_change_ack(h);
5158 register_value = readl(&(h->cfgtable->TransportActive)); 7300 register_value = readl(&(h->cfgtable->TransportActive));
@@ -5162,23 +7304,186 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
5162 return; 7304 return;
5163 } 7305 }
5164 /* Change the access methods to the performant access methods */ 7306 /* Change the access methods to the performant access methods */
5165 h->access = SA5_performant_access; 7307 h->access = access;
5166 h->transMethod = CFGTBL_Trans_Performant; 7308 h->transMethod = transMethod;
7309
7310 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7311 (trans_support & CFGTBL_Trans_io_accel2)))
7312 return;
7313
7314 if (trans_support & CFGTBL_Trans_io_accel1) {
7315 /* Set up I/O accelerator mode */
7316 for (i = 0; i < h->nreply_queues; i++) {
7317 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7318 h->reply_queue[i].current_entry =
7319 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7320 }
7321 bft[7] = h->ioaccel_maxsg + 8;
7322 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7323 h->ioaccel1_blockFetchTable);
7324
7325 /* initialize all reply queue entries to unused */
7326 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
7327 h->reply_pool_size);
7328
7329 /* set all the constant fields in the accelerator command
7330 * frames once at init time to save CPU cycles later.
7331 */
7332 for (i = 0; i < h->nr_cmds; i++) {
7333 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7334
7335 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7336 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7337 (i * sizeof(struct ErrorInfo)));
7338 cp->err_info_len = sizeof(struct ErrorInfo);
7339 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7340 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7341 cp->timeout_sec = 0;
7342 cp->ReplyQueue = 0;
7343 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7344 DIRECT_LOOKUP_BIT;
7345 cp->Tag.upper = 0;
7346 cp->host_addr.lower =
7347 (u32) (h->ioaccel_cmd_pool_dhandle +
7348 (i * sizeof(struct io_accel1_cmd)));
7349 cp->host_addr.upper = 0;
7350 }
7351 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7352 u64 cfg_offset, cfg_base_addr_index;
7353 u32 bft2_offset, cfg_base_addr;
7354 int rc;
7355
7356 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7357 &cfg_base_addr_index, &cfg_offset);
7358 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7359 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7360 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7361 4, h->ioaccel2_blockFetchTable);
7362 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7363 BUILD_BUG_ON(offsetof(struct CfgTable,
7364 io_accel_request_size_offset) != 0xb8);
7365 h->ioaccel2_bft2_regs =
7366 remap_pci_mem(pci_resource_start(h->pdev,
7367 cfg_base_addr_index) +
7368 cfg_offset + bft2_offset,
7369 ARRAY_SIZE(bft2) *
7370 sizeof(*h->ioaccel2_bft2_regs));
7371 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7372 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7373 }
7374 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7375 hpsa_wait_for_mode_change_ack(h);
7376}
7377
7378static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7379{
7380 h->ioaccel_maxsg =
7381 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7382 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7383 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7384
7385 /* Command structures must be aligned on a 128-byte boundary
7386 * because the 7 lower bits of the address are used by the
7387 * hardware.
7388 */
7389#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7390 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7391 IOACCEL1_COMMANDLIST_ALIGNMENT);
7392 h->ioaccel_cmd_pool =
7393 pci_alloc_consistent(h->pdev,
7394 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7395 &(h->ioaccel_cmd_pool_dhandle));
7396
7397 h->ioaccel1_blockFetchTable =
7398 kmalloc(((h->ioaccel_maxsg + 1) *
7399 sizeof(u32)), GFP_KERNEL);
7400
7401 if ((h->ioaccel_cmd_pool == NULL) ||
7402 (h->ioaccel1_blockFetchTable == NULL))
7403 goto clean_up;
7404
7405 memset(h->ioaccel_cmd_pool, 0,
7406 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7407 return 0;
7408
7409clean_up:
7410 if (h->ioaccel_cmd_pool)
7411 pci_free_consistent(h->pdev,
7412 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7413 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7414 kfree(h->ioaccel1_blockFetchTable);
7415 return 1;
7416}
7417
7418static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7419{
7420 /* Allocate ioaccel2 mode command blocks and block fetch table */
7421
7422 h->ioaccel_maxsg =
7423 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7424 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7425 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7426
7427#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7428 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7429 IOACCEL2_COMMANDLIST_ALIGNMENT);
7430 h->ioaccel2_cmd_pool =
7431 pci_alloc_consistent(h->pdev,
7432 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7433 &(h->ioaccel2_cmd_pool_dhandle));
7434
7435 h->ioaccel2_blockFetchTable =
7436 kmalloc(((h->ioaccel_maxsg + 1) *
7437 sizeof(u32)), GFP_KERNEL);
7438
7439 if ((h->ioaccel2_cmd_pool == NULL) ||
7440 (h->ioaccel2_blockFetchTable == NULL))
7441 goto clean_up;
7442
7443 memset(h->ioaccel2_cmd_pool, 0,
7444 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7445 return 0;
7446
7447clean_up:
7448 if (h->ioaccel2_cmd_pool)
7449 pci_free_consistent(h->pdev,
7450 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7451 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7452 kfree(h->ioaccel2_blockFetchTable);
7453 return 1;
5167} 7454}
5168 7455
5169static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7456static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
5170{ 7457{
5171 u32 trans_support; 7458 u32 trans_support;
7459 unsigned long transMethod = CFGTBL_Trans_Performant |
7460 CFGTBL_Trans_use_short_tags;
5172 int i; 7461 int i;
5173 7462
5174 if (hpsa_simple_mode) 7463 if (hpsa_simple_mode)
5175 return; 7464 return;
5176 7465
7466 /* Check for I/O accelerator mode support */
7467 if (trans_support & CFGTBL_Trans_io_accel1) {
7468 transMethod |= CFGTBL_Trans_io_accel1 |
7469 CFGTBL_Trans_enable_directed_msix;
7470 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7471 goto clean_up;
7472 } else {
7473 if (trans_support & CFGTBL_Trans_io_accel2) {
7474 transMethod |= CFGTBL_Trans_io_accel2 |
7475 CFGTBL_Trans_enable_directed_msix;
7476 if (ioaccel2_alloc_cmds_and_bft(h))
7477 goto clean_up;
7478 }
7479 }
7480
7481 /* TODO, check that this next line h->nreply_queues is correct */
5177 trans_support = readl(&(h->cfgtable->TransportSupport)); 7482 trans_support = readl(&(h->cfgtable->TransportSupport));
5178 if (!(trans_support & PERFORMANT_MODE)) 7483 if (!(trans_support & PERFORMANT_MODE))
5179 return; 7484 return;
5180 7485
5181 h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1; 7486 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
5182 hpsa_get_max_perf_mode_cmds(h); 7487 hpsa_get_max_perf_mode_cmds(h);
5183 /* Performant mode ring buffer and supporting data structures */ 7488 /* Performant mode ring buffer and supporting data structures */
5184 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7489 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
@@ -5200,9 +7505,7 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
5200 || (h->blockFetchTable == NULL)) 7505 || (h->blockFetchTable == NULL))
5201 goto clean_up; 7506 goto clean_up;
5202 7507
5203 hpsa_enter_performant_mode(h, 7508 hpsa_enter_performant_mode(h, trans_support);
5204 trans_support & CFGTBL_Trans_use_short_tags);
5205
5206 return; 7509 return;
5207 7510
5208clean_up: 7511clean_up:
@@ -5212,6 +7515,31 @@ clean_up:
5212 kfree(h->blockFetchTable); 7515 kfree(h->blockFetchTable);
5213} 7516}
5214 7517
7518static int is_accelerated_cmd(struct CommandList *c)
7519{
7520 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7521}
7522
7523static void hpsa_drain_accel_commands(struct ctlr_info *h)
7524{
7525 struct CommandList *c = NULL;
7526 unsigned long flags;
7527 int accel_cmds_out;
7528
7529 do { /* wait for all outstanding commands to drain out */
7530 accel_cmds_out = 0;
7531 spin_lock_irqsave(&h->lock, flags);
7532 list_for_each_entry(c, &h->cmpQ, list)
7533 accel_cmds_out += is_accelerated_cmd(c);
7534 list_for_each_entry(c, &h->reqQ, list)
7535 accel_cmds_out += is_accelerated_cmd(c);
7536 spin_unlock_irqrestore(&h->lock, flags);
7537 if (accel_cmds_out <= 0)
7538 break;
7539 msleep(100);
7540 } while (1);
7541}
7542
5215/* 7543/*
5216 * This is it. Register the PCI driver information for the cards we control 7544 * This is it. Register the PCI driver information for the cards we control
5217 * the OS will call our registered routines when it finds one of our cards. 7545 * the OS will call our registered routines when it finds one of our cards.
@@ -5226,5 +7554,83 @@ static void __exit hpsa_cleanup(void)
5226 pci_unregister_driver(&hpsa_pci_driver); 7554 pci_unregister_driver(&hpsa_pci_driver);
5227} 7555}
5228 7556
7557static void __attribute__((unused)) verify_offsets(void)
7558{
7559#define VERIFY_OFFSET(member, offset) \
7560 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7561
7562 VERIFY_OFFSET(structure_size, 0);
7563 VERIFY_OFFSET(volume_blk_size, 4);
7564 VERIFY_OFFSET(volume_blk_cnt, 8);
7565 VERIFY_OFFSET(phys_blk_shift, 16);
7566 VERIFY_OFFSET(parity_rotation_shift, 17);
7567 VERIFY_OFFSET(strip_size, 18);
7568 VERIFY_OFFSET(disk_starting_blk, 20);
7569 VERIFY_OFFSET(disk_blk_cnt, 28);
7570 VERIFY_OFFSET(data_disks_per_row, 36);
7571 VERIFY_OFFSET(metadata_disks_per_row, 38);
7572 VERIFY_OFFSET(row_cnt, 40);
7573 VERIFY_OFFSET(layout_map_count, 42);
7574 VERIFY_OFFSET(flags, 44);
7575 VERIFY_OFFSET(dekindex, 46);
7576 /* VERIFY_OFFSET(reserved, 48 */
7577 VERIFY_OFFSET(data, 64);
7578
7579#undef VERIFY_OFFSET
7580
7581#define VERIFY_OFFSET(member, offset) \
7582 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7583
7584 VERIFY_OFFSET(IU_type, 0);
7585 VERIFY_OFFSET(direction, 1);
7586 VERIFY_OFFSET(reply_queue, 2);
7587 /* VERIFY_OFFSET(reserved1, 3); */
7588 VERIFY_OFFSET(scsi_nexus, 4);
7589 VERIFY_OFFSET(Tag, 8);
7590 VERIFY_OFFSET(cdb, 16);
7591 VERIFY_OFFSET(cciss_lun, 32);
7592 VERIFY_OFFSET(data_len, 40);
7593 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7594 VERIFY_OFFSET(sg_count, 45);
7595 /* VERIFY_OFFSET(reserved3 */
7596 VERIFY_OFFSET(err_ptr, 48);
7597 VERIFY_OFFSET(err_len, 56);
7598 /* VERIFY_OFFSET(reserved4 */
7599 VERIFY_OFFSET(sg, 64);
7600
7601#undef VERIFY_OFFSET
7602
7603#define VERIFY_OFFSET(member, offset) \
7604 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7605
7606 VERIFY_OFFSET(dev_handle, 0x00);
7607 VERIFY_OFFSET(reserved1, 0x02);
7608 VERIFY_OFFSET(function, 0x03);
7609 VERIFY_OFFSET(reserved2, 0x04);
7610 VERIFY_OFFSET(err_info, 0x0C);
7611 VERIFY_OFFSET(reserved3, 0x10);
7612 VERIFY_OFFSET(err_info_len, 0x12);
7613 VERIFY_OFFSET(reserved4, 0x13);
7614 VERIFY_OFFSET(sgl_offset, 0x14);
7615 VERIFY_OFFSET(reserved5, 0x15);
7616 VERIFY_OFFSET(transfer_len, 0x1C);
7617 VERIFY_OFFSET(reserved6, 0x20);
7618 VERIFY_OFFSET(io_flags, 0x24);
7619 VERIFY_OFFSET(reserved7, 0x26);
7620 VERIFY_OFFSET(LUN, 0x34);
7621 VERIFY_OFFSET(control, 0x3C);
7622 VERIFY_OFFSET(CDB, 0x40);
7623 VERIFY_OFFSET(reserved8, 0x50);
7624 VERIFY_OFFSET(host_context_flags, 0x60);
7625 VERIFY_OFFSET(timeout_sec, 0x62);
7626 VERIFY_OFFSET(ReplyQueue, 0x64);
7627 VERIFY_OFFSET(reserved9, 0x65);
7628 VERIFY_OFFSET(Tag, 0x68);
7629 VERIFY_OFFSET(host_addr, 0x70);
7630 VERIFY_OFFSET(CISS_LUN, 0x78);
7631 VERIFY_OFFSET(SG, 0x78 + 8);
7632#undef VERIFY_OFFSET
7633}
7634
5229module_init(hpsa_init); 7635module_init(hpsa_init);
5230module_exit(hpsa_cleanup); 7636module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 01c328349c83..44235a27e1b6 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -46,6 +46,15 @@ struct hpsa_scsi_dev_t {
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char raid_level; /* from inquiry page 0xC1 */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */
49 unsigned char volume_offline; /* discovered via TUR or VPD */
50 u32 ioaccel_handle;
51 int offload_config; /* I/O accel RAID offload configured */
52 int offload_enabled; /* I/O accel RAID offload enabled */
53 int offload_to_mirror; /* Send next I/O accelerator RAID
54 * offload request to mirror drive
55 */
56 struct raid_map_data raid_map; /* I/O accelerator RAID map */
57
49}; 58};
50 59
51struct reply_pool { 60struct reply_pool {
@@ -55,6 +64,46 @@ struct reply_pool {
55 u32 current_entry; 64 u32 current_entry;
56}; 65};
57 66
67#pragma pack(1)
68struct bmic_controller_parameters {
69 u8 led_flags;
70 u8 enable_command_list_verification;
71 u8 backed_out_write_drives;
72 u16 stripes_for_parity;
73 u8 parity_distribution_mode_flags;
74 u16 max_driver_requests;
75 u16 elevator_trend_count;
76 u8 disable_elevator;
77 u8 force_scan_complete;
78 u8 scsi_transfer_mode;
79 u8 force_narrow;
80 u8 rebuild_priority;
81 u8 expand_priority;
82 u8 host_sdb_asic_fix;
83 u8 pdpi_burst_from_host_disabled;
84 char software_name[64];
85 char hardware_name[32];
86 u8 bridge_revision;
87 u8 snapshot_priority;
88 u32 os_specific;
89 u8 post_prompt_timeout;
90 u8 automatic_drive_slamming;
91 u8 reserved1;
92 u8 nvram_flags;
93 u8 cache_nvram_flags;
94 u8 drive_config_flags;
95 u16 reserved2;
96 u8 temp_warning_level;
97 u8 temp_shutdown_level;
98 u8 temp_condition_reset;
99 u8 max_coalesce_commands;
100 u32 max_coalesce_delay;
101 u8 orca_password[4];
102 u8 access_id[16];
103 u8 reserved[356];
104};
105#pragma pack()
106
58struct ctlr_info { 107struct ctlr_info {
59 int ctlr; 108 int ctlr;
60 char devname[8]; 109 char devname[8];
@@ -80,6 +129,7 @@ struct ctlr_info {
80 unsigned int msi_vector; 129 unsigned int msi_vector;
81 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 130 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
82 struct access_method access; 131 struct access_method access;
132 char hba_mode_enabled;
83 133
84 /* queue and queue Info */ 134 /* queue and queue Info */
85 struct list_head reqQ; 135 struct list_head reqQ;
@@ -95,6 +145,10 @@ struct ctlr_info {
95 /* pointers to command and error info pool */ 145 /* pointers to command and error info pool */
96 struct CommandList *cmd_pool; 146 struct CommandList *cmd_pool;
97 dma_addr_t cmd_pool_dhandle; 147 dma_addr_t cmd_pool_dhandle;
148 struct io_accel1_cmd *ioaccel_cmd_pool;
149 dma_addr_t ioaccel_cmd_pool_dhandle;
150 struct io_accel2_cmd *ioaccel2_cmd_pool;
151 dma_addr_t ioaccel2_cmd_pool_dhandle;
98 struct ErrorInfo *errinfo_pool; 152 struct ErrorInfo *errinfo_pool;
99 dma_addr_t errinfo_pool_dhandle; 153 dma_addr_t errinfo_pool_dhandle;
100 unsigned long *cmd_pool_bits; 154 unsigned long *cmd_pool_bits;
@@ -128,7 +182,14 @@ struct ctlr_info {
128 u8 nreply_queues; 182 u8 nreply_queues;
129 dma_addr_t reply_pool_dhandle; 183 dma_addr_t reply_pool_dhandle;
130 u32 *blockFetchTable; 184 u32 *blockFetchTable;
185 u32 *ioaccel1_blockFetchTable;
186 u32 *ioaccel2_blockFetchTable;
187 u32 *ioaccel2_bft2_regs;
131 unsigned char *hba_inquiry_data; 188 unsigned char *hba_inquiry_data;
189 u32 driver_support;
190 u32 fw_support;
191 int ioaccel_support;
192 int ioaccel_maxsg;
132 u64 last_intr_timestamp; 193 u64 last_intr_timestamp;
133 u32 last_heartbeat; 194 u32 last_heartbeat;
134 u64 last_heartbeat_timestamp; 195 u64 last_heartbeat_timestamp;
@@ -161,7 +222,35 @@ struct ctlr_info {
161#define HPSATMF_LOG_QRY_TASK (1 << 23) 222#define HPSATMF_LOG_QRY_TASK (1 << 23)
162#define HPSATMF_LOG_QRY_TSET (1 << 24) 223#define HPSATMF_LOG_QRY_TSET (1 << 24)
163#define HPSATMF_LOG_QRY_ASYNC (1 << 25) 224#define HPSATMF_LOG_QRY_ASYNC (1 << 25)
225 u32 events;
226#define CTLR_STATE_CHANGE_EVENT (1 << 0)
227#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
228#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
229#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
230#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
231#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
232#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
233
234#define RESCAN_REQUIRED_EVENT_BITS \
235 (CTLR_STATE_CHANGE_EVENT | \
236 CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
237 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
238 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
239 CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
240 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
241 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
242 spinlock_t offline_device_lock;
243 struct list_head offline_device_list;
244 int acciopath_status;
245 int drv_req_rescan; /* flag for driver to request rescan event */
246 int raid_offload_debug;
164}; 247};
248
249struct offline_device_entry {
250 unsigned char scsi3addr[8];
251 struct list_head offline_list;
252};
253
165#define HPSA_ABORT_MSG 0 254#define HPSA_ABORT_MSG 0
166#define HPSA_DEVICE_RESET_MSG 1 255#define HPSA_DEVICE_RESET_MSG 1
167#define HPSA_RESET_TYPE_CONTROLLER 0x00 256#define HPSA_RESET_TYPE_CONTROLLER 0x00
@@ -242,6 +331,14 @@ struct ctlr_info {
242 331
243#define HPSA_INTR_ON 1 332#define HPSA_INTR_ON 1
244#define HPSA_INTR_OFF 0 333#define HPSA_INTR_OFF 0
334
335/*
336 * Inbound Post Queue offsets for IO Accelerator Mode 2
337 */
338#define IOACCEL2_INBOUND_POSTQ_32 0x48
339#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
340#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
341
245/* 342/*
246 Send the command to the hardware 343 Send the command to the hardware
247*/ 344*/
@@ -254,6 +351,18 @@ static void SA5_submit_command(struct ctlr_info *h,
254 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 351 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
255} 352}
256 353
354static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
355 struct CommandList *c)
356{
357 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
358 c->Header.Tag.lower);
359 if (c->cmd_type == CMD_IOACCEL2)
360 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
361 else
362 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
363 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
364}
365
257/* 366/*
258 * This card is the opposite of the other cards. 367 * This card is the opposite of the other cards.
259 * 0 turns interrupts on... 368 * 0 turns interrupts on...
@@ -387,6 +496,50 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
387 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 496 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
388} 497}
389 498
499#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
500
501static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
502{
503 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
504
505 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
506 true : false;
507}
508
509#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
510#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
511#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
512#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
513
514static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
515{
516 u64 register_value;
517 struct reply_pool *rq = &h->reply_queue[q];
518 unsigned long flags;
519
520 BUG_ON(q >= h->nreply_queues);
521
522 register_value = rq->head[rq->current_entry];
523 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
524 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
525 if (++rq->current_entry == rq->size)
526 rq->current_entry = 0;
527 /*
528 * @todo
529 *
530 * Don't really need to write the new index after each command,
531 * but with current driver design this is easiest.
532 */
533 wmb();
534 writel((q << 24) | rq->current_entry, h->vaddr +
535 IOACCEL_MODE1_CONSUMER_INDEX);
536 spin_lock_irqsave(&h->lock, flags);
537 h->commands_outstanding--;
538 spin_unlock_irqrestore(&h->lock, flags);
539 }
540 return (unsigned long) register_value;
541}
542
390static struct access_method SA5_access = { 543static struct access_method SA5_access = {
391 SA5_submit_command, 544 SA5_submit_command,
392 SA5_intr_mask, 545 SA5_intr_mask,
@@ -395,6 +548,22 @@ static struct access_method SA5_access = {
395 SA5_completed, 548 SA5_completed,
396}; 549};
397 550
551static struct access_method SA5_ioaccel_mode1_access = {
552 SA5_submit_command,
553 SA5_performant_intr_mask,
554 SA5_fifo_full,
555 SA5_ioaccel_mode1_intr_pending,
556 SA5_ioaccel_mode1_completed,
557};
558
559static struct access_method SA5_ioaccel_mode2_access = {
560 SA5_submit_command_ioaccel2,
561 SA5_performant_intr_mask,
562 SA5_fifo_full,
563 SA5_performant_intr_pending,
564 SA5_performant_completed,
565};
566
398static struct access_method SA5_performant_access = { 567static struct access_method SA5_performant_access = {
399 SA5_submit_command, 568 SA5_submit_command,
400 SA5_performant_intr_mask, 569 SA5_performant_intr_mask,
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index bfc8c4ea66f8..b5cc7052339f 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
25#define SENSEINFOBYTES 32 /* may vary between hbas */ 25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ 26#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
27#define HPSA_SG_CHAIN 0x80000000 27#define HPSA_SG_CHAIN 0x80000000
28#define HPSA_SG_LAST 0x40000000
28#define MAXREPLYQS 256 29#define MAXREPLYQS 256
29 30
30/* Command Status value */ 31/* Command Status value */
@@ -41,6 +42,8 @@
41#define CMD_UNSOLICITED_ABORT 0x000A 42#define CMD_UNSOLICITED_ABORT 0x000A
42#define CMD_TIMEOUT 0x000B 43#define CMD_TIMEOUT 0x000B
43#define CMD_UNABORTABLE 0x000C 44#define CMD_UNABORTABLE 0x000C
45#define CMD_IOACCEL_DISABLED 0x000E
46
44 47
45/* Unit Attentions ASC's as defined for the MSA2012sa */ 48/* Unit Attentions ASC's as defined for the MSA2012sa */
46#define POWER_OR_RESET 0x29 49#define POWER_OR_RESET 0x29
@@ -79,8 +82,9 @@
79#define ATTR_ACA 0x07 82#define ATTR_ACA 0x07
80 83
81/* cdb type */ 84/* cdb type */
82#define TYPE_CMD 0x00 85#define TYPE_CMD 0x00
83#define TYPE_MSG 0x01 86#define TYPE_MSG 0x01
87#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */
84 88
85/* Message Types */ 89/* Message Types */
86#define HPSA_TASK_MANAGEMENT 0x00 90#define HPSA_TASK_MANAGEMENT 0x00
@@ -125,9 +129,12 @@
125#define CFGTBL_AccCmds 0x00000001l 129#define CFGTBL_AccCmds 0x00000001l
126#define DOORBELL_CTLR_RESET 0x00000004l 130#define DOORBELL_CTLR_RESET 0x00000004l
127#define DOORBELL_CTLR_RESET2 0x00000020l 131#define DOORBELL_CTLR_RESET2 0x00000020l
132#define DOORBELL_CLEAR_EVENTS 0x00000040l
128 133
129#define CFGTBL_Trans_Simple 0x00000002l 134#define CFGTBL_Trans_Simple 0x00000002l
130#define CFGTBL_Trans_Performant 0x00000004l 135#define CFGTBL_Trans_Performant 0x00000004l
136#define CFGTBL_Trans_io_accel1 0x00000080l
137#define CFGTBL_Trans_io_accel2 0x00000100l
131#define CFGTBL_Trans_use_short_tags 0x20000000l 138#define CFGTBL_Trans_use_short_tags 0x20000000l
132#define CFGTBL_Trans_enable_directed_msix (1 << 30) 139#define CFGTBL_Trans_enable_directed_msix (1 << 30)
133 140
@@ -135,6 +142,28 @@
135#define CFGTBL_BusType_Ultra3 0x00000002l 142#define CFGTBL_BusType_Ultra3 0x00000002l
136#define CFGTBL_BusType_Fibre1G 0x00000100l 143#define CFGTBL_BusType_Fibre1G 0x00000100l
137#define CFGTBL_BusType_Fibre2G 0x00000200l 144#define CFGTBL_BusType_Fibre2G 0x00000200l
145
146/* VPD Inquiry types */
147#define HPSA_VPD_SUPPORTED_PAGES 0x00
148#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
149#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
150#define HPSA_VPD_LV_STATUS 0xC3
151#define HPSA_VPD_HEADER_SZ 4
152
153/* Logical volume states */
154#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1
155#define HPSA_LV_OK 0x0
156#define HPSA_LV_UNDERGOING_ERASE 0x0F
157#define HPSA_LV_UNDERGOING_RPI 0x12
158#define HPSA_LV_PENDING_RPI 0x13
159#define HPSA_LV_ENCRYPTED_NO_KEY 0x14
160#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
161#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16
162#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
163#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
164#define HPSA_LV_PENDING_ENCRYPTION 0x19
165#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A
166
138struct vals32 { 167struct vals32 {
139 u32 lower; 168 u32 lower;
140 u32 upper; 169 u32 upper;
@@ -162,9 +191,50 @@ struct InquiryData {
162 191
163#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ 192#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
164#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ 193#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
194#define HPSA_REPORT_PHYS_EXTENDED 0x02
195#define HPSA_CISS_READ 0xc0 /* CISS Read */
196#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */
197
198#define RAID_MAP_MAX_ENTRIES 256
199
200struct raid_map_disk_data {
201 u32 ioaccel_handle; /**< Handle to access this disk via the
202 * I/O accelerator */
203 u8 xor_mult[2]; /**< XOR multipliers for this position,
204 * valid for data disks only */
205 u8 reserved[2];
206};
207
208struct raid_map_data {
209 u32 structure_size; /* Size of entire structure in bytes */
210 u32 volume_blk_size; /* bytes / block in the volume */
211 u64 volume_blk_cnt; /* logical blocks on the volume */
212 u8 phys_blk_shift; /* Shift factor to convert between
213 * units of logical blocks and physical
214 * disk blocks */
215 u8 parity_rotation_shift; /* Shift factor to convert between units
216 * of logical stripes and physical
217 * stripes */
218 u16 strip_size; /* blocks used on each disk / stripe */
219 u64 disk_starting_blk; /* First disk block used in volume */
220 u64 disk_blk_cnt; /* disk blocks used by volume / disk */
221 u16 data_disks_per_row; /* data disk entries / row in the map */
222 u16 metadata_disks_per_row; /* mirror/parity disk entries / row
223 * in the map */
224 u16 row_cnt; /* rows in each layout map */
225 u16 layout_map_count; /* layout maps (1 map per mirror/parity
226 * group) */
227 u16 flags; /* Bit 0 set if encryption enabled */
228#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
229 u16 dekindex; /* Data encryption key index. */
230 u8 reserved[16];
231 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
232};
233
165struct ReportLUNdata { 234struct ReportLUNdata {
166 u8 LUNListLength[4]; 235 u8 LUNListLength[4];
167 u32 reserved; 236 u8 extended_response_flag;
237 u8 reserved[3];
168 u8 LUN[HPSA_MAX_LUN][8]; 238 u8 LUN[HPSA_MAX_LUN][8];
169}; 239};
170 240
@@ -187,6 +257,7 @@ struct SenseSubsystem_info {
187#define BMIC_CACHE_FLUSH 0xc2 257#define BMIC_CACHE_FLUSH 0xc2
188#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ 258#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
189#define BMIC_FLASH_FIRMWARE 0xF7 259#define BMIC_FLASH_FIRMWARE 0xF7
260#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
190 261
191/* Command List Structure */ 262/* Command List Structure */
192union SCSI3Addr { 263union SCSI3Addr {
@@ -283,6 +354,8 @@ struct ErrorInfo {
283/* Command types */ 354/* Command types */
284#define CMD_IOCTL_PEND 0x01 355#define CMD_IOCTL_PEND 0x01
285#define CMD_SCSI 0x03 356#define CMD_SCSI 0x03
357#define CMD_IOACCEL1 0x04
358#define CMD_IOACCEL2 0x05
286 359
287#define DIRECT_LOOKUP_SHIFT 5 360#define DIRECT_LOOKUP_SHIFT 5
288#define DIRECT_LOOKUP_BIT 0x10 361#define DIRECT_LOOKUP_BIT 0x10
@@ -314,7 +387,6 @@ struct CommandList {
314 int cmd_type; 387 int cmd_type;
315 long cmdindex; 388 long cmdindex;
316 struct list_head list; 389 struct list_head list;
317 struct request *rq;
318 struct completion *waiting; 390 struct completion *waiting;
319 void *scsi_cmd; 391 void *scsi_cmd;
320 392
@@ -327,16 +399,183 @@ struct CommandList {
327 */ 399 */
328#define IS_32_BIT ((8 - sizeof(long))/4) 400#define IS_32_BIT ((8 - sizeof(long))/4)
329#define IS_64_BIT (!IS_32_BIT) 401#define IS_64_BIT (!IS_32_BIT)
330#define PAD_32 (4) 402#define PAD_32 (40)
331#define PAD_64 (4) 403#define PAD_64 (12)
332#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 404#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
333 u8 pad[COMMANDLIST_PAD]; 405 u8 pad[COMMANDLIST_PAD];
334}; 406};
335 407
408/* Max S/G elements in I/O accelerator command */
409#define IOACCEL1_MAXSGENTRIES 24
410#define IOACCEL2_MAXSGENTRIES 28
411
412/*
413 * Structure for I/O accelerator (mode 1) commands.
414 * Note that this structure must be 128-byte aligned in size.
415 */
416struct io_accel1_cmd {
417 u16 dev_handle; /* 0x00 - 0x01 */
418 u8 reserved1; /* 0x02 */
419 u8 function; /* 0x03 */
420 u8 reserved2[8]; /* 0x04 - 0x0B */
421 u32 err_info; /* 0x0C - 0x0F */
422 u8 reserved3[2]; /* 0x10 - 0x11 */
423 u8 err_info_len; /* 0x12 */
424 u8 reserved4; /* 0x13 */
425 u8 sgl_offset; /* 0x14 */
426 u8 reserved5[7]; /* 0x15 - 0x1B */
427 u32 transfer_len; /* 0x1C - 0x1F */
428 u8 reserved6[4]; /* 0x20 - 0x23 */
429 u16 io_flags; /* 0x24 - 0x25 */
430 u8 reserved7[14]; /* 0x26 - 0x33 */
431 u8 LUN[8]; /* 0x34 - 0x3B */
432 u32 control; /* 0x3C - 0x3F */
433 u8 CDB[16]; /* 0x40 - 0x4F */
434 u8 reserved8[16]; /* 0x50 - 0x5F */
435 u16 host_context_flags; /* 0x60 - 0x61 */
436 u16 timeout_sec; /* 0x62 - 0x63 */
437 u8 ReplyQueue; /* 0x64 */
438 u8 reserved9[3]; /* 0x65 - 0x67 */
439 struct vals32 Tag; /* 0x68 - 0x6F */
440 struct vals32 host_addr; /* 0x70 - 0x77 */
441 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
442 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
443#define IOACCEL1_PAD_64 0
444#define IOACCEL1_PAD_32 0
445#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
446 IS_64_BIT * IOACCEL1_PAD_64)
447 u8 pad[IOACCEL1_PAD];
448};
449
450#define IOACCEL1_FUNCTION_SCSIIO 0x00
451#define IOACCEL1_SGLOFFSET 32
452
453#define IOACCEL1_IOFLAGS_IO_REQ 0x4000
454#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F
455#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16
456
457#define IOACCEL1_CONTROL_NODATAXFER 0x00000000
458#define IOACCEL1_CONTROL_DATA_OUT 0x01000000
459#define IOACCEL1_CONTROL_DATA_IN 0x02000000
460#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800
461#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
462#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000
463#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100
464#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200
465#define IOACCEL1_CONTROL_ACA 0x00000400
466
467#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013
468
469#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
470
471struct ioaccel2_sg_element {
472 u64 address;
473 u32 length;
474 u8 reserved[3];
475 u8 chain_indicator;
476#define IOACCEL2_CHAIN 0x80
477};
478
479/*
480 * SCSI Response Format structure for IO Accelerator Mode 2
481 */
482struct io_accel2_scsi_response {
483 u8 IU_type;
484#define IOACCEL2_IU_TYPE_SRF 0x60
485 u8 reserved1[3];
486 u8 req_id[4]; /* request identifier */
487 u8 reserved2[4];
488 u8 serv_response; /* service response */
489#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000
490#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001
491#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002
492#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003
493#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004
494#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005
495 u8 status; /* status */
496#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00
497#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02
498#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08
499#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18
500#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28
501#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40
502#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E
503 u8 data_present; /* low 2 bits */
504#define IOACCEL2_NO_DATAPRESENT 0x000
505#define IOACCEL2_RESPONSE_DATAPRESENT 0x001
506#define IOACCEL2_SENSE_DATA_PRESENT 0x002
507#define IOACCEL2_RESERVED 0x003
508 u8 sense_data_len; /* sense/response data length */
509 u8 resid_cnt[4]; /* residual count */
510 u8 sense_data_buff[32]; /* sense/response data buffer */
511};
512
513#define IOACCEL2_64_PAD 76
514#define IOACCEL2_32_PAD 76
515#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
516 IS_64_BIT * IOACCEL2_64_PAD)
517/*
518 * Structure for I/O accelerator (mode 2 or m2) commands.
519 * Note that this structure must be 128-byte aligned in size.
520 */
521struct io_accel2_cmd {
522 u8 IU_type; /* IU Type */
523 u8 direction; /* direction, memtype, and encryption */
524#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */
525#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */
526 /* 0b=PCIe, 1b=DDR */
527#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */
528 /* 0=off, 1=on */
529 u8 reply_queue; /* Reply Queue ID */
530 u8 reserved1; /* Reserved */
531 u32 scsi_nexus; /* Device Handle */
532 u32 Tag; /* cciss tag, lower 4 bytes only */
533 u32 tweak_lower; /* Encryption tweak, lower 4 bytes */
534 u8 cdb[16]; /* SCSI Command Descriptor Block */
535 u8 cciss_lun[8]; /* 8 byte SCSI address */
536 u32 data_len; /* Total bytes to transfer */
537 u8 cmd_priority_task_attr; /* priority and task attrs */
538#define IOACCEL2_PRIORITY_MASK 0x78
539#define IOACCEL2_ATTR_MASK 0x07
540 u8 sg_count; /* Number of sg elements */
541 u16 dekindex; /* Data encryption key index */
542 u64 err_ptr; /* Error Pointer */
543 u32 err_len; /* Error Length*/
544 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
545 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
546 struct io_accel2_scsi_response error_data;
547 u8 pad[IOACCEL2_PAD];
548};
549
550/*
551 * defines for Mode 2 command struct
552 * FIXME: this can't be all I need mfm
553 */
554#define IOACCEL2_IU_TYPE 0x40
555#define IOACCEL2_IU_TMF_TYPE 0x41
556#define IOACCEL2_DIR_NO_DATA 0x00
557#define IOACCEL2_DIR_DATA_IN 0x01
558#define IOACCEL2_DIR_DATA_OUT 0x02
559/*
560 * SCSI Task Management Request format for Accelerator Mode 2
561 */
562struct hpsa_tmf_struct {
563 u8 iu_type; /* Information Unit Type */
564 u8 reply_queue; /* Reply Queue ID */
565 u8 tmf; /* Task Management Function */
566 u8 reserved1; /* byte 3 Reserved */
567 u32 it_nexus; /* SCSI I-T Nexus */
568 u8 lun_id[8]; /* LUN ID for TMF request */
569 struct vals32 Tag; /* cciss tag associated w/ request */
570 struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
571 u64 error_ptr; /* Error Pointer */
572 u32 error_len; /* Error Length */
573};
574
336/* Configuration Table Structure */ 575/* Configuration Table Structure */
337struct HostWrite { 576struct HostWrite {
338 u32 TransportRequest; 577 u32 TransportRequest;
339 u32 Reserved; 578 u32 command_pool_addr_hi;
340 u32 CoalIntDelay; 579 u32 CoalIntDelay;
341 u32 CoalIntCount; 580 u32 CoalIntCount;
342}; 581};
@@ -344,6 +583,9 @@ struct HostWrite {
344#define SIMPLE_MODE 0x02 583#define SIMPLE_MODE 0x02
345#define PERFORMANT_MODE 0x04 584#define PERFORMANT_MODE 0x04
346#define MEMQ_MODE 0x08 585#define MEMQ_MODE 0x08
586#define IOACCEL_MODE_1 0x80
587
588#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
347 589
348struct CfgTable { 590struct CfgTable {
349 u8 Signature[4]; 591 u8 Signature[4];
@@ -373,8 +615,18 @@ struct CfgTable {
373 u32 misc_fw_support; /* offset 0x78 */ 615 u32 misc_fw_support; /* offset 0x78 */
374#define MISC_FW_DOORBELL_RESET (0x02) 616#define MISC_FW_DOORBELL_RESET (0x02)
375#define MISC_FW_DOORBELL_RESET2 (0x010) 617#define MISC_FW_DOORBELL_RESET2 (0x010)
618#define MISC_FW_RAID_OFFLOAD_BASIC (0x020)
619#define MISC_FW_EVENT_NOTIFY (0x080)
376 u8 driver_version[32]; 620 u8 driver_version[32];
377 621 u32 max_cached_write_size;
622 u8 driver_scratchpad[16];
623 u32 max_error_info_length;
624 u32 io_accel_max_embedded_sg_count;
625 u32 io_accel_request_size_offset;
626 u32 event_notify;
627#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
628#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
629 u32 clear_event_notify;
378}; 630};
379 631
380#define NUM_BLOCKFETCH_ENTRIES 8 632#define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bf9eca845166..56f8a861ed72 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -589,7 +589,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
589 } 589 }
590 590
591 err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt, 591 err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
592 IRQF_DISABLED, "ibmvstgt", target); 592 0, "ibmvstgt", target);
593 if (err) 593 if (err)
594 goto req_irq_failed; 594 goto req_irq_failed;
595 595
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index bf028218ac36..b1c4d831137d 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt)
2015 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ 2015 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
2016 write1_io(0, IO_INTR_MASK); /* allow all ints */ 2016 write1_io(0, IO_INTR_MASK); /* allow all ints */
2017 x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; 2017 x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
2018 if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { 2018 if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
2019 printk("in2000_detect: Unable to allocate IRQ.\n"); 2019 printk("in2000_detect: Unable to allocate IRQ.\n");
2020 detect_count--; 2020 detect_count--;
2021 continue; 2021 continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 280d5af113d1..e5dae7b54d9a 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2931,7 +2931,7 @@ static int initio_probe_one(struct pci_dev *pdev,
2931 shost->base = host->addr; 2931 shost->base = host->addr;
2932 shost->sg_tablesize = TOTAL_SG_ENTRY; 2932 shost->sg_tablesize = TOTAL_SG_ENTRY;
2933 2933
2934 error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost); 2934 error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
2935 if (error < 0) { 2935 if (error < 0) {
2936 printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq); 2936 printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
2937 goto out_free_scbs; 2937 goto out_free_scbs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a99892..2f8dd8e4225b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1143,6 +1143,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1143 res->add_to_ml = 0; 1143 res->add_to_ml = 0;
1144 res->del_from_ml = 0; 1144 res->del_from_ml = 0;
1145 res->resetting_device = 0; 1145 res->resetting_device = 0;
1146 res->reset_occurred = 0;
1146 res->sdev = NULL; 1147 res->sdev = NULL;
1147 res->sata_port = NULL; 1148 res->sata_port = NULL;
1148 1149
@@ -2367,6 +2368,42 @@ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2367} 2368}
2368 2369
2369/** 2370/**
2371 * ipr_log_sis64_device_error - Log a cache error.
2372 * @ioa_cfg: ioa config struct
2373 * @hostrcb: hostrcb struct
2374 *
2375 * Return value:
2376 * none
2377 **/
2378static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379 struct ipr_hostrcb *hostrcb)
2380{
2381 struct ipr_hostrcb_type_21_error *error;
2382 char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384 error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386 ipr_err("-----Failing Device Information-----\n");
2387 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390 ipr_err("Device Resource Path: %s\n",
2391 __ipr_format_res_path(error->res_path,
2392 buffer, sizeof(buffer)));
2393 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2397 ipr_err("SCSI Sense Data:\n");
2398 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399 ipr_err("SCSI Command Descriptor Block: \n");
2400 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402 ipr_err("Additional IOA Data:\n");
2403 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404}
2405
2406/**
2370 * ipr_get_error - Find the specfied IOASC in the ipr_error_table. 2407 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2371 * @ioasc: IOASC 2408 * @ioasc: IOASC
2372 * 2409 *
@@ -2467,6 +2504,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2467 case IPR_HOST_RCB_OVERLAY_ID_20: 2504 case IPR_HOST_RCB_OVERLAY_ID_20:
2468 ipr_log_fabric_error(ioa_cfg, hostrcb); 2505 ipr_log_fabric_error(ioa_cfg, hostrcb);
2469 break; 2506 break;
2507 case IPR_HOST_RCB_OVERLAY_ID_21:
2508 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2509 break;
2470 case IPR_HOST_RCB_OVERLAY_ID_23: 2510 case IPR_HOST_RCB_OVERLAY_ID_23:
2471 ipr_log_sis64_config_error(ioa_cfg, hostrcb); 2511 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2472 break; 2512 break;
@@ -5015,6 +5055,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5015 } else 5055 } else
5016 rc = ipr_device_reset(ioa_cfg, res); 5056 rc = ipr_device_reset(ioa_cfg, res);
5017 res->resetting_device = 0; 5057 res->resetting_device = 0;
5058 res->reset_occurred = 1;
5018 5059
5019 LEAVE; 5060 LEAVE;
5020 return rc ? FAILED : SUCCESS; 5061 return rc ? FAILED : SUCCESS;
@@ -6183,8 +6224,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6183 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6224 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6184 6225
6185 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 6226 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6186 if (ipr_is_gscsi(res)) 6227 if (ipr_is_gscsi(res) && res->reset_occurred) {
6228 res->reset_occurred = 0;
6187 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 6229 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6230 }
6188 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; 6231 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6189 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); 6232 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6190 } 6233 }
@@ -8641,6 +8684,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8641} 8684}
8642 8685
8643/** 8686/**
8687 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8688 * @pdev: PCI device struct
8689 *
8690 * Description: This routine is called to tell us that the MMIO
8691 * access to the IOA has been restored
8692 */
8693static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8694{
8695 unsigned long flags = 0;
8696 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8697
8698 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8699 if (!ioa_cfg->probe_done)
8700 pci_save_state(pdev);
8701 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8702 return PCI_ERS_RESULT_NEED_RESET;
8703}
8704
8705/**
8644 * ipr_pci_frozen - Called when slot has experienced a PCI bus error. 8706 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8645 * @pdev: PCI device struct 8707 * @pdev: PCI device struct
8646 * 8708 *
@@ -8654,7 +8716,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev)
8654 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8716 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8655 8717
8656 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8718 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8657 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); 8719 if (ioa_cfg->probe_done)
8720 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8659} 8722}
8660 8723
@@ -8672,11 +8735,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8672 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8735 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8673 8736
8674 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8737 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8675 if (ioa_cfg->needs_warm_reset) 8738 if (ioa_cfg->probe_done) {
8676 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8739 if (ioa_cfg->needs_warm_reset)
8677 else 8740 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8678 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, 8741 else
8679 IPR_SHUTDOWN_NONE); 8742 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8743 IPR_SHUTDOWN_NONE);
8744 } else
8745 wake_up_all(&ioa_cfg->eeh_wait_q);
8680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8746 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8681 return PCI_ERS_RESULT_RECOVERED; 8747 return PCI_ERS_RESULT_RECOVERED;
8682} 8748}
@@ -8695,17 +8761,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8695 int i; 8761 int i;
8696 8762
8697 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8763 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8698 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8764 if (ioa_cfg->probe_done) {
8699 ioa_cfg->sdt_state = ABORT_DUMP; 8765 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8700 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; 8766 ioa_cfg->sdt_state = ABORT_DUMP;
8701 ioa_cfg->in_ioa_bringdown = 1; 8767 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8702 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8768 ioa_cfg->in_ioa_bringdown = 1;
8703 spin_lock(&ioa_cfg->hrrq[i]._lock); 8769 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8704 ioa_cfg->hrrq[i].allow_cmds = 0; 8770 spin_lock(&ioa_cfg->hrrq[i]._lock);
8705 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8771 ioa_cfg->hrrq[i].allow_cmds = 0;
8706 } 8772 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8707 wmb(); 8773 }
8708 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8774 wmb();
8775 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8776 } else
8777 wake_up_all(&ioa_cfg->eeh_wait_q);
8709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8778 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8710} 8779}
8711 8780
@@ -8725,7 +8794,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8725 switch (state) { 8794 switch (state) {
8726 case pci_channel_io_frozen: 8795 case pci_channel_io_frozen:
8727 ipr_pci_frozen(pdev); 8796 ipr_pci_frozen(pdev);
8728 return PCI_ERS_RESULT_NEED_RESET; 8797 return PCI_ERS_RESULT_CAN_RECOVER;
8729 case pci_channel_io_perm_failure: 8798 case pci_channel_io_perm_failure:
8730 ipr_pci_perm_failure(pdev); 8799 ipr_pci_perm_failure(pdev);
8731 return PCI_ERS_RESULT_DISCONNECT; 8800 return PCI_ERS_RESULT_DISCONNECT;
@@ -8755,6 +8824,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8755 ENTER; 8824 ENTER;
8756 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8825 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8757 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); 8826 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8827 ioa_cfg->probe_done = 1;
8758 if (ioa_cfg->needs_hard_reset) { 8828 if (ioa_cfg->needs_hard_reset) {
8759 ioa_cfg->needs_hard_reset = 0; 8829 ioa_cfg->needs_hard_reset = 0;
8760 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8830 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9030,16 +9100,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9030 if (!ioa_cfg->vpd_cbs) 9100 if (!ioa_cfg->vpd_cbs)
9031 goto out_free_res_entries; 9101 goto out_free_res_entries;
9032 9102
9033 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9034 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9035 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9036 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9037 if (i == 0)
9038 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9039 else
9040 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9041 }
9042
9043 if (ipr_alloc_cmd_blks(ioa_cfg)) 9103 if (ipr_alloc_cmd_blks(ioa_cfg))
9044 goto out_free_vpd_cbs; 9104 goto out_free_vpd_cbs;
9045 9105
@@ -9140,6 +9200,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9140} 9200}
9141 9201
9142/** 9202/**
9203 * ipr_init_regs - Initialize IOA registers
9204 * @ioa_cfg: ioa config struct
9205 *
9206 * Return value:
9207 * none
9208 **/
9209static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9210{
9211 const struct ipr_interrupt_offsets *p;
9212 struct ipr_interrupts *t;
9213 void __iomem *base;
9214
9215 p = &ioa_cfg->chip_cfg->regs;
9216 t = &ioa_cfg->regs;
9217 base = ioa_cfg->hdw_dma_regs;
9218
9219 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9220 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9221 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9222 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9223 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9224 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9225 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9226 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9227 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9228 t->ioarrin_reg = base + p->ioarrin_reg;
9229 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9230 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9231 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9232 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9233 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9234 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9235
9236 if (ioa_cfg->sis64) {
9237 t->init_feedback_reg = base + p->init_feedback_reg;
9238 t->dump_addr_reg = base + p->dump_addr_reg;
9239 t->dump_data_reg = base + p->dump_data_reg;
9240 t->endian_swap_reg = base + p->endian_swap_reg;
9241 }
9242}
9243
9244/**
9143 * ipr_init_ioa_cfg - Initialize IOA config struct 9245 * ipr_init_ioa_cfg - Initialize IOA config struct
9144 * @ioa_cfg: ioa config struct 9246 * @ioa_cfg: ioa config struct
9145 * @host: scsi host struct 9247 * @host: scsi host struct
@@ -9151,9 +9253,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9151static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, 9253static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9152 struct Scsi_Host *host, struct pci_dev *pdev) 9254 struct Scsi_Host *host, struct pci_dev *pdev)
9153{ 9255{
9154 const struct ipr_interrupt_offsets *p; 9256 int i;
9155 struct ipr_interrupts *t;
9156 void __iomem *base;
9157 9257
9158 ioa_cfg->host = host; 9258 ioa_cfg->host = host;
9159 ioa_cfg->pdev = pdev; 9259 ioa_cfg->pdev = pdev;
@@ -9173,6 +9273,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9173 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9273 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9174 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9274 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9175 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9275 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9276 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9176 ioa_cfg->sdt_state = INACTIVE; 9277 ioa_cfg->sdt_state = INACTIVE;
9177 9278
9178 ipr_initialize_bus_attr(ioa_cfg); 9279 ipr_initialize_bus_attr(ioa_cfg);
@@ -9183,44 +9284,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9183 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; 9284 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9184 if (ipr_max_devs > IPR_MAX_SIS64_DEVS) 9285 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9185 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; 9286 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9287 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9288 + ((sizeof(struct ipr_config_table_entry64)
9289 * ioa_cfg->max_devs_supported)));
9186 } else { 9290 } else {
9187 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 9291 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9188 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 9292 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9189 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) 9293 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9190 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; 9294 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9295 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9296 + ((sizeof(struct ipr_config_table_entry)
9297 * ioa_cfg->max_devs_supported)));
9191 } 9298 }
9299
9192 host->max_channel = IPR_MAX_BUS_TO_SCAN; 9300 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9193 host->unique_id = host->host_no; 9301 host->unique_id = host->host_no;
9194 host->max_cmd_len = IPR_MAX_CDB_LEN; 9302 host->max_cmd_len = IPR_MAX_CDB_LEN;
9195 host->can_queue = ioa_cfg->max_cmds; 9303 host->can_queue = ioa_cfg->max_cmds;
9196 pci_set_drvdata(pdev, ioa_cfg); 9304 pci_set_drvdata(pdev, ioa_cfg);
9197 9305
9198 p = &ioa_cfg->chip_cfg->regs; 9306 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9199 t = &ioa_cfg->regs; 9307 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9200 base = ioa_cfg->hdw_dma_regs; 9308 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9201 9309 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9202 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 9310 if (i == 0)
9203 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 9311 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9204 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; 9312 else
9205 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 9313 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9206 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9207 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9208 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9209 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9210 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9211 t->ioarrin_reg = base + p->ioarrin_reg;
9212 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9213 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9214 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9215 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9216 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9217 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9218
9219 if (ioa_cfg->sis64) {
9220 t->init_feedback_reg = base + p->init_feedback_reg;
9221 t->dump_addr_reg = base + p->dump_addr_reg;
9222 t->dump_data_reg = base + p->dump_data_reg;
9223 t->endian_swap_reg = base + p->endian_swap_reg;
9224 } 9314 }
9225} 9315}
9226 9316
@@ -9243,54 +9333,63 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
9243 return NULL; 9333 return NULL;
9244} 9334}
9245 9335
9336/**
9337 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9338 * during probe time
9339 * @ioa_cfg: ioa config struct
9340 *
9341 * Return value:
9342 * None
9343 **/
9344static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9345{
9346 struct pci_dev *pdev = ioa_cfg->pdev;
9347
9348 if (pci_channel_offline(pdev)) {
9349 wait_event_timeout(ioa_cfg->eeh_wait_q,
9350 !pci_channel_offline(pdev),
9351 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9352 pci_restore_state(pdev);
9353 }
9354}
9355
9246static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) 9356static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9247{ 9357{
9248 struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; 9358 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9249 int i, err, vectors; 9359 int i, vectors;
9250 9360
9251 for (i = 0; i < ARRAY_SIZE(entries); ++i) 9361 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9252 entries[i].entry = i; 9362 entries[i].entry = i;
9253 9363
9254 vectors = ipr_number_of_msix; 9364 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9255 9365 entries, 1, ipr_number_of_msix);
9256 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0) 9366 if (vectors < 0) {
9257 vectors = err; 9367 ipr_wait_for_pci_err_recovery(ioa_cfg);
9258 9368 return vectors;
9259 if (err < 0) {
9260 pci_disable_msix(ioa_cfg->pdev);
9261 return err;
9262 } 9369 }
9263 9370
9264 if (!err) { 9371 for (i = 0; i < vectors; i++)
9265 for (i = 0; i < vectors; i++) 9372 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9266 ioa_cfg->vectors_info[i].vec = entries[i].vector; 9373 ioa_cfg->nvectors = vectors;
9267 ioa_cfg->nvectors = vectors;
9268 }
9269 9374
9270 return err; 9375 return 0;
9271} 9376}
9272 9377
9273static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) 9378static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9274{ 9379{
9275 int i, err, vectors; 9380 int i, vectors;
9276
9277 vectors = ipr_number_of_msix;
9278 9381
9279 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0) 9382 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9280 vectors = err; 9383 if (vectors < 0) {
9281 9384 ipr_wait_for_pci_err_recovery(ioa_cfg);
9282 if (err < 0) { 9385 return vectors;
9283 pci_disable_msi(ioa_cfg->pdev);
9284 return err;
9285 } 9386 }
9286 9387
9287 if (!err) { 9388 for (i = 0; i < vectors; i++)
9288 for (i = 0; i < vectors; i++) 9389 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9289 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; 9390 ioa_cfg->nvectors = vectors;
9290 ioa_cfg->nvectors = vectors;
9291 }
9292 9391
9293 return err; 9392 return 0;
9294} 9393}
9295 9394
9296static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) 9395static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
@@ -9355,7 +9454,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
9355 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. 9454 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9356 * @pdev: PCI device struct 9455 * @pdev: PCI device struct
9357 * 9456 *
9358 * Description: The return value from pci_enable_msi() can not always be 9457 * Description: The return value from pci_enable_msi_range() can not always be
9359 * trusted. This routine sets up and initiates a test interrupt to determine 9458 * trusted. This routine sets up and initiates a test interrupt to determine
9360 * if the interrupt is received via the ipr_test_intr() service routine. 9459 * if the interrupt is received via the ipr_test_intr() service routine.
9361 * If the tests fails, the driver will fall back to LSI. 9460 * If the tests fails, the driver will fall back to LSI.
@@ -9434,19 +9533,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9434 9533
9435 ENTER; 9534 ENTER;
9436 9535
9437 if ((rc = pci_enable_device(pdev))) {
9438 dev_err(&pdev->dev, "Cannot enable adapter\n");
9439 goto out;
9440 }
9441
9442 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); 9536 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9443
9444 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); 9537 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9445 9538
9446 if (!host) { 9539 if (!host) {
9447 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); 9540 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9448 rc = -ENOMEM; 9541 rc = -ENOMEM;
9449 goto out_disable; 9542 goto out;
9450 } 9543 }
9451 9544
9452 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 9545 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9476,6 +9569,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9476 9569
9477 ioa_cfg->revid = pdev->revision; 9570 ioa_cfg->revid = pdev->revision;
9478 9571
9572 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9573
9479 ipr_regs_pci = pci_resource_start(pdev, 0); 9574 ipr_regs_pci = pci_resource_start(pdev, 0);
9480 9575
9481 rc = pci_request_regions(pdev, IPR_NAME); 9576 rc = pci_request_regions(pdev, IPR_NAME);
@@ -9485,22 +9580,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9485 goto out_scsi_host_put; 9580 goto out_scsi_host_put;
9486 } 9581 }
9487 9582
9583 rc = pci_enable_device(pdev);
9584
9585 if (rc || pci_channel_offline(pdev)) {
9586 if (pci_channel_offline(pdev)) {
9587 ipr_wait_for_pci_err_recovery(ioa_cfg);
9588 rc = pci_enable_device(pdev);
9589 }
9590
9591 if (rc) {
9592 dev_err(&pdev->dev, "Cannot enable adapter\n");
9593 ipr_wait_for_pci_err_recovery(ioa_cfg);
9594 goto out_release_regions;
9595 }
9596 }
9597
9488 ipr_regs = pci_ioremap_bar(pdev, 0); 9598 ipr_regs = pci_ioremap_bar(pdev, 0);
9489 9599
9490 if (!ipr_regs) { 9600 if (!ipr_regs) {
9491 dev_err(&pdev->dev, 9601 dev_err(&pdev->dev,
9492 "Couldn't map memory range of registers\n"); 9602 "Couldn't map memory range of registers\n");
9493 rc = -ENOMEM; 9603 rc = -ENOMEM;
9494 goto out_release_regions; 9604 goto out_disable;
9495 } 9605 }
9496 9606
9497 ioa_cfg->hdw_dma_regs = ipr_regs; 9607 ioa_cfg->hdw_dma_regs = ipr_regs;
9498 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; 9608 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9499 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; 9609 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9500 9610
9501 ipr_init_ioa_cfg(ioa_cfg, host, pdev); 9611 ipr_init_regs(ioa_cfg);
9502
9503 pci_set_master(pdev);
9504 9612
9505 if (ioa_cfg->sis64) { 9613 if (ioa_cfg->sis64) {
9506 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 9614 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9508,7 +9616,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9508 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); 9616 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9509 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9617 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9510 } 9618 }
9511
9512 } else 9619 } else
9513 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9620 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9514 9621
@@ -9522,10 +9629,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9522 9629
9523 if (rc != PCIBIOS_SUCCESSFUL) { 9630 if (rc != PCIBIOS_SUCCESSFUL) {
9524 dev_err(&pdev->dev, "Write of cache line size failed\n"); 9631 dev_err(&pdev->dev, "Write of cache line size failed\n");
9632 ipr_wait_for_pci_err_recovery(ioa_cfg);
9525 rc = -EIO; 9633 rc = -EIO;
9526 goto cleanup_nomem; 9634 goto cleanup_nomem;
9527 } 9635 }
9528 9636
9637 /* Issue MMIO read to ensure card is not in EEH */
9638 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9639 ipr_wait_for_pci_err_recovery(ioa_cfg);
9640
9529 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { 9641 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9530 dev_err(&pdev->dev, "The max number of MSIX is %d\n", 9642 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9531 IPR_MAX_MSIX_VECTORS); 9643 IPR_MAX_MSIX_VECTORS);
@@ -9544,10 +9656,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9544 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9656 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9545 } 9657 }
9546 9658
9659 pci_set_master(pdev);
9660
9661 if (pci_channel_offline(pdev)) {
9662 ipr_wait_for_pci_err_recovery(ioa_cfg);
9663 pci_set_master(pdev);
9664 if (pci_channel_offline(pdev)) {
9665 rc = -EIO;
9666 goto out_msi_disable;
9667 }
9668 }
9669
9547 if (ioa_cfg->intr_flag == IPR_USE_MSI || 9670 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9548 ioa_cfg->intr_flag == IPR_USE_MSIX) { 9671 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9549 rc = ipr_test_msi(ioa_cfg, pdev); 9672 rc = ipr_test_msi(ioa_cfg, pdev);
9550 if (rc == -EOPNOTSUPP) { 9673 if (rc == -EOPNOTSUPP) {
9674 ipr_wait_for_pci_err_recovery(ioa_cfg);
9551 if (ioa_cfg->intr_flag == IPR_USE_MSI) { 9675 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9552 ioa_cfg->intr_flag &= ~IPR_USE_MSI; 9676 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9553 pci_disable_msi(pdev); 9677 pci_disable_msi(pdev);
@@ -9577,30 +9701,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9577 (unsigned int)num_online_cpus(), 9701 (unsigned int)num_online_cpus(),
9578 (unsigned int)IPR_MAX_HRRQ_NUM); 9702 (unsigned int)IPR_MAX_HRRQ_NUM);
9579 9703
9580 /* Save away PCI config space for use following IOA reset */
9581 rc = pci_save_state(pdev);
9582
9583 if (rc != PCIBIOS_SUCCESSFUL) {
9584 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9585 rc = -EIO;
9586 goto out_msi_disable;
9587 }
9588
9589 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 9704 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9590 goto out_msi_disable; 9705 goto out_msi_disable;
9591 9706
9592 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 9707 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9593 goto out_msi_disable; 9708 goto out_msi_disable;
9594 9709
9595 if (ioa_cfg->sis64)
9596 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9597 + ((sizeof(struct ipr_config_table_entry64)
9598 * ioa_cfg->max_devs_supported)));
9599 else
9600 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9601 + ((sizeof(struct ipr_config_table_entry)
9602 * ioa_cfg->max_devs_supported)));
9603
9604 rc = ipr_alloc_mem(ioa_cfg); 9710 rc = ipr_alloc_mem(ioa_cfg);
9605 if (rc < 0) { 9711 if (rc < 0) {
9606 dev_err(&pdev->dev, 9712 dev_err(&pdev->dev,
@@ -9608,6 +9714,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9608 goto out_msi_disable; 9714 goto out_msi_disable;
9609 } 9715 }
9610 9716
9717 /* Save away PCI config space for use following IOA reset */
9718 rc = pci_save_state(pdev);
9719
9720 if (rc != PCIBIOS_SUCCESSFUL) {
9721 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9722 rc = -EIO;
9723 goto cleanup_nolog;
9724 }
9725
9611 /* 9726 /*
9612 * If HRRQ updated interrupt is not masked, or reset alert is set, 9727 * If HRRQ updated interrupt is not masked, or reset alert is set,
9613 * the card is in an unknown state and needs a hard reset 9728 * the card is in an unknown state and needs a hard reset
@@ -9664,18 +9779,19 @@ out:
9664cleanup_nolog: 9779cleanup_nolog:
9665 ipr_free_mem(ioa_cfg); 9780 ipr_free_mem(ioa_cfg);
9666out_msi_disable: 9781out_msi_disable:
9782 ipr_wait_for_pci_err_recovery(ioa_cfg);
9667 if (ioa_cfg->intr_flag == IPR_USE_MSI) 9783 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9668 pci_disable_msi(pdev); 9784 pci_disable_msi(pdev);
9669 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 9785 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9670 pci_disable_msix(pdev); 9786 pci_disable_msix(pdev);
9671cleanup_nomem: 9787cleanup_nomem:
9672 iounmap(ipr_regs); 9788 iounmap(ipr_regs);
9789out_disable:
9790 pci_disable_device(pdev);
9673out_release_regions: 9791out_release_regions:
9674 pci_release_regions(pdev); 9792 pci_release_regions(pdev);
9675out_scsi_host_put: 9793out_scsi_host_put:
9676 scsi_host_put(host); 9794 scsi_host_put(host);
9677out_disable:
9678 pci_disable_device(pdev);
9679 goto out; 9795 goto out;
9680} 9796}
9681 9797
@@ -9994,6 +10110,8 @@ static struct pci_device_id ipr_pci_table[] = {
9994 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10110 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9995 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, 10111 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
9996 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10112 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10113 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10114 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9997 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, 10115 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
9998 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10116 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9999 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, 10117 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
@@ -10005,12 +10123,19 @@ static struct pci_device_id ipr_pci_table[] = {
10005 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, 10123 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10006 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 10124 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10007 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, 10125 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10126 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10127 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10128 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10129 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10130 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10131 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10008 { } 10132 { }
10009}; 10133};
10010MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10134MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10011 10135
10012static const struct pci_error_handlers ipr_err_handler = { 10136static const struct pci_error_handlers ipr_err_handler = {
10013 .error_detected = ipr_pci_error_detected, 10137 .error_detected = ipr_pci_error_detected,
10138 .mmio_enabled = ipr_pci_mmio_enabled,
10014 .slot_reset = ipr_pci_slot_reset, 10139 .slot_reset = ipr_pci_slot_reset,
10015}; 10140};
10016 10141
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ce38a22647e..31ed126f7143 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -101,12 +101,16 @@
101#define IPR_SUBS_DEV_ID_57D7 0x03FF 101#define IPR_SUBS_DEV_ID_57D7 0x03FF
102#define IPR_SUBS_DEV_ID_57D8 0x03FE 102#define IPR_SUBS_DEV_ID_57D8 0x03FE
103#define IPR_SUBS_DEV_ID_57D9 0x046D 103#define IPR_SUBS_DEV_ID_57D9 0x046D
104#define IPR_SUBS_DEV_ID_57DA 0x04CA
104#define IPR_SUBS_DEV_ID_57EB 0x0474 105#define IPR_SUBS_DEV_ID_57EB 0x0474
105#define IPR_SUBS_DEV_ID_57EC 0x0475 106#define IPR_SUBS_DEV_ID_57EC 0x0475
106#define IPR_SUBS_DEV_ID_57ED 0x0499 107#define IPR_SUBS_DEV_ID_57ED 0x0499
107#define IPR_SUBS_DEV_ID_57EE 0x049A 108#define IPR_SUBS_DEV_ID_57EE 0x049A
108#define IPR_SUBS_DEV_ID_57EF 0x049B 109#define IPR_SUBS_DEV_ID_57EF 0x049B
109#define IPR_SUBS_DEV_ID_57F0 0x049C 110#define IPR_SUBS_DEV_ID_57F0 0x049C
111#define IPR_SUBS_DEV_ID_2CCA 0x04C7
112#define IPR_SUBS_DEV_ID_2CD2 0x04C8
113#define IPR_SUBS_DEV_ID_2CCD 0x04C9
110#define IPR_NAME "ipr" 114#define IPR_NAME "ipr"
111 115
112/* 116/*
@@ -230,6 +234,7 @@
230#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) 234#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
231#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 235#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
232#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 236#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
237#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
233#define IPR_PCI_RESET_TIMEOUT (HZ / 2) 238#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
234#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ) 239#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
235#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ) 240#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
@@ -897,6 +902,18 @@ struct ipr_hostrcb_type_01_error {
897 __be32 ioa_data[236]; 902 __be32 ioa_data[236];
898}__attribute__((packed, aligned (4))); 903}__attribute__((packed, aligned (4)));
899 904
905struct ipr_hostrcb_type_21_error {
906 __be32 wwn[4];
907 u8 res_path[8];
908 u8 primary_problem_desc[32];
909 u8 second_problem_desc[32];
910 __be32 sense_data[8];
911 __be32 cdb[4];
912 __be32 residual_trans_length;
913 __be32 length_of_error;
914 __be32 ioa_data[236];
915}__attribute__((packed, aligned (4)));
916
900struct ipr_hostrcb_type_02_error { 917struct ipr_hostrcb_type_02_error {
901 struct ipr_vpd ioa_vpd; 918 struct ipr_vpd ioa_vpd;
902 struct ipr_vpd cfc_vpd; 919 struct ipr_vpd cfc_vpd;
@@ -1126,6 +1143,7 @@ struct ipr_hostrcb64_error {
1126 struct ipr_hostrcb_type_ff_error type_ff_error; 1143 struct ipr_hostrcb_type_ff_error type_ff_error;
1127 struct ipr_hostrcb_type_12_error type_12_error; 1144 struct ipr_hostrcb_type_12_error type_12_error;
1128 struct ipr_hostrcb_type_17_error type_17_error; 1145 struct ipr_hostrcb_type_17_error type_17_error;
1146 struct ipr_hostrcb_type_21_error type_21_error;
1129 struct ipr_hostrcb_type_23_error type_23_error; 1147 struct ipr_hostrcb_type_23_error type_23_error;
1130 struct ipr_hostrcb_type_24_error type_24_error; 1148 struct ipr_hostrcb_type_24_error type_24_error;
1131 struct ipr_hostrcb_type_30_error type_30_error; 1149 struct ipr_hostrcb_type_30_error type_30_error;
@@ -1169,6 +1187,7 @@ struct ipr_hcam {
1169#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 1187#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
1170#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 1188#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
1171#define IPR_HOST_RCB_OVERLAY_ID_20 0x20 1189#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
1190#define IPR_HOST_RCB_OVERLAY_ID_21 0x21
1172#define IPR_HOST_RCB_OVERLAY_ID_23 0x23 1191#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
1173#define IPR_HOST_RCB_OVERLAY_ID_24 0x24 1192#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
1174#define IPR_HOST_RCB_OVERLAY_ID_26 0x26 1193#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
@@ -1252,6 +1271,7 @@ struct ipr_resource_entry {
1252 u8 add_to_ml:1; 1271 u8 add_to_ml:1;
1253 u8 del_from_ml:1; 1272 u8 del_from_ml:1;
1254 u8 resetting_device:1; 1273 u8 resetting_device:1;
1274 u8 reset_occurred:1;
1255 1275
1256 u32 bus; /* AKA channel */ 1276 u32 bus; /* AKA channel */
1257 u32 target; /* AKA id */ 1277 u32 target; /* AKA id */
@@ -1441,6 +1461,7 @@ struct ipr_ioa_cfg {
1441 u8 dump_timeout:1; 1461 u8 dump_timeout:1;
1442 u8 cfg_locked:1; 1462 u8 cfg_locked:1;
1443 u8 clear_isr:1; 1463 u8 clear_isr:1;
1464 u8 probe_done:1;
1444 1465
1445 u8 revid; 1466 u8 revid;
1446 1467
@@ -1519,6 +1540,7 @@ struct ipr_ioa_cfg {
1519 1540
1520 wait_queue_head_t reset_wait_q; 1541 wait_queue_head_t reset_wait_q;
1521 wait_queue_head_t msi_wait_q; 1542 wait_queue_head_t msi_wait_q;
1543 wait_queue_head_t eeh_wait_q;
1522 1544
1523 struct ipr_dump *dump; 1545 struct ipr_dump *dump;
1524 enum ipr_sdt_state sdt_state; 1546 enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d25d0d859f05..695b34e9154e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -66,7 +66,7 @@
66#include "probe_roms.h" 66#include "probe_roms.h"
67 67
68#define MAJ 1 68#define MAJ 1
69#define MIN 1 69#define MIN 2
70#define BUILD 0 70#define BUILD 0
71#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 71#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
72 __stringify(BUILD) 72 __stringify(BUILD)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 99d2930b18c8..56e38096f0c4 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2723,13 +2723,9 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
2723 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2723 memcpy(resp->ending_fis, fis, sizeof(*fis));
2724 ts->buf_valid_size = sizeof(*resp); 2724 ts->buf_valid_size = sizeof(*resp);
2725 2725
2726 /* If the device fault bit is set in the status register, then 2726 /* If an error is flagged let libata decode the fis */
2727 * set the sense data and return. 2727 if (ac_err_mask(fis->status))
2728 */
2729 if (fis->status & ATA_DF)
2730 ts->stat = SAS_PROTO_RESPONSE; 2728 ts->stat = SAS_PROTO_RESPONSE;
2731 else if (fis->status & ATA_ERR)
2732 ts->stat = SAM_STAT_CHECK_CONDITION;
2733 else 2729 else
2734 ts->stat = SAM_STAT_GOOD; 2730 ts->stat = SAM_STAT_GOOD;
2735 2731
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 14c1c8f6a95e..680bf6f0ce76 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -490,5 +490,6 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
490 iscsi_boot_remove_kobj(boot_kobj); 490 iscsi_boot_remove_kobj(boot_kobj);
491 491
492 kset_unregister(boot_kset->kset); 492 kset_unregister(boot_kset->kset);
493 kfree(boot_kset);
493} 494}
494EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset); 495EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index add6d1566ec8..bfb6d07d87f0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -593,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
593 iscsi_sw_tcp_conn_restore_callbacks(conn); 593 iscsi_sw_tcp_conn_restore_callbacks(conn);
594 sock_put(sock->sk); 594 sock_put(sock->sk);
595 595
596 spin_lock_bh(&session->lock); 596 spin_lock_bh(&session->frwd_lock);
597 tcp_sw_conn->sock = NULL; 597 tcp_sw_conn->sock = NULL;
598 spin_unlock_bh(&session->lock); 598 spin_unlock_bh(&session->frwd_lock);
599 sockfd_put(sock); 599 sockfd_put(sock);
600} 600}
601 601
@@ -663,10 +663,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
663 if (err) 663 if (err)
664 goto free_socket; 664 goto free_socket;
665 665
666 spin_lock_bh(&session->lock); 666 spin_lock_bh(&session->frwd_lock);
667 /* bind iSCSI connection and socket */ 667 /* bind iSCSI connection and socket */
668 tcp_sw_conn->sock = sock; 668 tcp_sw_conn->sock = sock;
669 spin_unlock_bh(&session->lock); 669 spin_unlock_bh(&session->frwd_lock);
670 670
671 /* setup Socket parameters */ 671 /* setup Socket parameters */
672 sk = sock->sk; 672 sk = sock->sk;
@@ -726,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
726 switch(param) { 726 switch(param) {
727 case ISCSI_PARAM_CONN_PORT: 727 case ISCSI_PARAM_CONN_PORT:
728 case ISCSI_PARAM_CONN_ADDRESS: 728 case ISCSI_PARAM_CONN_ADDRESS:
729 spin_lock_bh(&conn->session->lock); 729 spin_lock_bh(&conn->session->frwd_lock);
730 if (!tcp_sw_conn || !tcp_sw_conn->sock) { 730 if (!tcp_sw_conn || !tcp_sw_conn->sock) {
731 spin_unlock_bh(&conn->session->lock); 731 spin_unlock_bh(&conn->session->frwd_lock);
732 return -ENOTCONN; 732 return -ENOTCONN;
733 } 733 }
734 rc = kernel_getpeername(tcp_sw_conn->sock, 734 rc = kernel_getpeername(tcp_sw_conn->sock,
735 (struct sockaddr *)&addr, &len); 735 (struct sockaddr *)&addr, &len);
736 spin_unlock_bh(&conn->session->lock); 736 spin_unlock_bh(&conn->session->frwd_lock);
737 if (rc) 737 if (rc)
738 return rc; 738 return rc;
739 739
@@ -759,23 +759,26 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
759 759
760 switch (param) { 760 switch (param) {
761 case ISCSI_HOST_PARAM_IPADDRESS: 761 case ISCSI_HOST_PARAM_IPADDRESS:
762 spin_lock_bh(&session->lock); 762 if (!session)
763 return -ENOTCONN;
764
765 spin_lock_bh(&session->frwd_lock);
763 conn = session->leadconn; 766 conn = session->leadconn;
764 if (!conn) { 767 if (!conn) {
765 spin_unlock_bh(&session->lock); 768 spin_unlock_bh(&session->frwd_lock);
766 return -ENOTCONN; 769 return -ENOTCONN;
767 } 770 }
768 tcp_conn = conn->dd_data; 771 tcp_conn = conn->dd_data;
769 772
770 tcp_sw_conn = tcp_conn->dd_data; 773 tcp_sw_conn = tcp_conn->dd_data;
771 if (!tcp_sw_conn->sock) { 774 if (!tcp_sw_conn->sock) {
772 spin_unlock_bh(&session->lock); 775 spin_unlock_bh(&session->frwd_lock);
773 return -ENOTCONN; 776 return -ENOTCONN;
774 } 777 }
775 778
776 rc = kernel_getsockname(tcp_sw_conn->sock, 779 rc = kernel_getsockname(tcp_sw_conn->sock,
777 (struct sockaddr *)&addr, &len); 780 (struct sockaddr *)&addr, &len);
778 spin_unlock_bh(&session->lock); 781 spin_unlock_bh(&session->frwd_lock);
779 if (rc) 782 if (rc)
780 return rc; 783 return rc;
781 784
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 40462415291e..5b8605ca42fa 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -110,16 +110,8 @@ static void __iscsi_update_cmdsn(struct iscsi_session *session,
110 session->exp_cmdsn = exp_cmdsn; 110 session->exp_cmdsn = exp_cmdsn;
111 111
112 if (max_cmdsn != session->max_cmdsn && 112 if (max_cmdsn != session->max_cmdsn &&
113 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { 113 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
114 session->max_cmdsn = max_cmdsn; 114 session->max_cmdsn = max_cmdsn;
115 /*
116 * if the window closed with IO queued, then kick the
117 * xmit thread
118 */
119 if (!list_empty(&session->leadconn->cmdqueue) ||
120 !list_empty(&session->leadconn->mgmtqueue))
121 iscsi_conn_queue_work(session->leadconn);
122 }
123} 115}
124 116
125void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 117void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
@@ -481,7 +473,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
481 * iscsi_free_task - free a task 473 * iscsi_free_task - free a task
482 * @task: iscsi cmd task 474 * @task: iscsi cmd task
483 * 475 *
484 * Must be called with session lock. 476 * Must be called with session back_lock.
485 * This function returns the scsi command to scsi-ml or cleans 477 * This function returns the scsi command to scsi-ml or cleans
486 * up mgmt tasks then returns the task to the pool. 478 * up mgmt tasks then returns the task to the pool.
487 */ 479 */
@@ -535,9 +527,10 @@ void iscsi_put_task(struct iscsi_task *task)
535{ 527{
536 struct iscsi_session *session = task->conn->session; 528 struct iscsi_session *session = task->conn->session;
537 529
538 spin_lock_bh(&session->lock); 530 /* regular RX path uses back_lock */
531 spin_lock_bh(&session->back_lock);
539 __iscsi_put_task(task); 532 __iscsi_put_task(task);
540 spin_unlock_bh(&session->lock); 533 spin_unlock_bh(&session->back_lock);
541} 534}
542EXPORT_SYMBOL_GPL(iscsi_put_task); 535EXPORT_SYMBOL_GPL(iscsi_put_task);
543 536
@@ -546,7 +539,7 @@ EXPORT_SYMBOL_GPL(iscsi_put_task);
546 * @task: iscsi cmd task 539 * @task: iscsi cmd task
547 * @state: state to complete task with 540 * @state: state to complete task with
548 * 541 *
549 * Must be called with session lock. 542 * Must be called with session back_lock.
550 */ 543 */
551static void iscsi_complete_task(struct iscsi_task *task, int state) 544static void iscsi_complete_task(struct iscsi_task *task, int state)
552{ 545{
@@ -585,7 +578,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
585 * This is used when drivers do not need or cannot perform 578 * This is used when drivers do not need or cannot perform
586 * lower level pdu processing. 579 * lower level pdu processing.
587 * 580 *
588 * Called with session lock 581 * Called with session back_lock
589 */ 582 */
590void iscsi_complete_scsi_task(struct iscsi_task *task, 583void iscsi_complete_scsi_task(struct iscsi_task *task,
591 uint32_t exp_cmdsn, uint32_t max_cmdsn) 584 uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -602,7 +595,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
602 595
603 596
604/* 597/*
605 * session lock must be held and if not called for a task that is 598 * session back_lock must be held and if not called for a task that is
606 * still pending or from the xmit thread, then xmit thread must 599 * still pending or from the xmit thread, then xmit thread must
607 * be suspended. 600 * be suspended.
608 */ 601 */
@@ -642,7 +635,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
642 scsi_in(sc)->resid = scsi_in(sc)->length; 635 scsi_in(sc)->resid = scsi_in(sc)->length;
643 } 636 }
644 637
638 /* regular RX path uses back_lock */
639 spin_lock_bh(&conn->session->back_lock);
645 iscsi_complete_task(task, state); 640 iscsi_complete_task(task, state);
641 spin_unlock_bh(&conn->session->back_lock);
646} 642}
647 643
648static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 644static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -780,7 +776,10 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
780 return task; 776 return task;
781 777
782free_task: 778free_task:
779 /* regular RX path uses back_lock */
780 spin_lock_bh(&session->back_lock);
783 __iscsi_put_task(task); 781 __iscsi_put_task(task);
782 spin_unlock_bh(&session->back_lock);
784 return NULL; 783 return NULL;
785} 784}
786 785
@@ -791,10 +790,10 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
791 struct iscsi_session *session = conn->session; 790 struct iscsi_session *session = conn->session;
792 int err = 0; 791 int err = 0;
793 792
794 spin_lock_bh(&session->lock); 793 spin_lock_bh(&session->frwd_lock);
795 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 794 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
796 err = -EPERM; 795 err = -EPERM;
797 spin_unlock_bh(&session->lock); 796 spin_unlock_bh(&session->frwd_lock);
798 return err; 797 return err;
799} 798}
800EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 799EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1013,13 +1012,13 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1013 iscsi_conn_printk(KERN_ERR, conn, 1012 iscsi_conn_printk(KERN_ERR, conn,
1014 "pdu (op 0x%x itt 0x%x) rejected " 1013 "pdu (op 0x%x itt 0x%x) rejected "
1015 "due to DataDigest error.\n", 1014 "due to DataDigest error.\n",
1016 rejected_pdu.itt, opcode); 1015 opcode, rejected_pdu.itt);
1017 break; 1016 break;
1018 case ISCSI_REASON_IMM_CMD_REJECT: 1017 case ISCSI_REASON_IMM_CMD_REJECT:
1019 iscsi_conn_printk(KERN_ERR, conn, 1018 iscsi_conn_printk(KERN_ERR, conn,
1020 "pdu (op 0x%x itt 0x%x) rejected. Too many " 1019 "pdu (op 0x%x itt 0x%x) rejected. Too many "
1021 "immediate commands.\n", 1020 "immediate commands.\n",
1022 rejected_pdu.itt, opcode); 1021 opcode, rejected_pdu.itt);
1023 /* 1022 /*
1024 * We only send one TMF at a time so if the target could not 1023 * We only send one TMF at a time so if the target could not
1025 * handle it, then it should get fixed (RFC mandates that 1024 * handle it, then it should get fixed (RFC mandates that
@@ -1031,14 +1030,19 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1031 if (opcode != ISCSI_OP_NOOP_OUT) 1030 if (opcode != ISCSI_OP_NOOP_OUT)
1032 return 0; 1031 return 0;
1033 1032
1034 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) 1033 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
1035 /* 1034 /*
1036 * nop-out in response to target's nop-out rejected. 1035 * nop-out in response to target's nop-out rejected.
1037 * Just resend. 1036 * Just resend.
1038 */ 1037 */
1038 /* In RX path we are under back lock */
1039 spin_unlock(&conn->session->back_lock);
1040 spin_lock(&conn->session->frwd_lock);
1039 iscsi_send_nopout(conn, 1041 iscsi_send_nopout(conn,
1040 (struct iscsi_nopin*)&rejected_pdu); 1042 (struct iscsi_nopin*)&rejected_pdu);
1041 else { 1043 spin_unlock(&conn->session->frwd_lock);
1044 spin_lock(&conn->session->back_lock);
1045 } else {
1042 struct iscsi_task *task; 1046 struct iscsi_task *task;
1043 /* 1047 /*
1044 * Our nop as ping got dropped. We know the target 1048 * Our nop as ping got dropped. We know the target
@@ -1059,8 +1063,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1059 default: 1063 default:
1060 iscsi_conn_printk(KERN_ERR, conn, 1064 iscsi_conn_printk(KERN_ERR, conn,
1061 "pdu (op 0x%x itt 0x%x) rejected. Reason " 1065 "pdu (op 0x%x itt 0x%x) rejected. Reason "
1062 "code 0x%x\n", rejected_pdu.itt, 1066 "code 0x%x\n", rejected_pdu.opcode,
1063 rejected_pdu.opcode, reject->reason); 1067 rejected_pdu.itt, reject->reason);
1064 break; 1068 break;
1065 } 1069 }
1066 return rc; 1070 return rc;
@@ -1074,7 +1078,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1074 * This should be used for mgmt tasks like login and nops, or if 1078 * This should be used for mgmt tasks like login and nops, or if
1075 * the LDD's itt space does not include the session age. 1079 * the LDD's itt space does not include the session age.
1076 * 1080 *
1077 * The session lock must be held. 1081 * The session back_lock must be held.
1078 */ 1082 */
1079struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 1083struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
1080{ 1084{
@@ -1103,7 +1107,7 @@ EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
1103 * @datalen: len of data buffer 1107 * @datalen: len of data buffer
1104 * 1108 *
1105 * Completes pdu processing by freeing any resources allocated at 1109 * Completes pdu processing by freeing any resources allocated at
1106 * queuecommand or send generic. session lock must be held and verify 1110 * queuecommand or send generic. session back_lock must be held and verify
1107 * itt must have been called. 1111 * itt must have been called.
1108 */ 1112 */
1109int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 1113int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -1140,7 +1144,12 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1140 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) 1144 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
1141 break; 1145 break;
1142 1146
1147 /* In RX path we are under back lock */
1148 spin_unlock(&session->back_lock);
1149 spin_lock(&session->frwd_lock);
1143 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); 1150 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
1151 spin_unlock(&session->frwd_lock);
1152 spin_lock(&session->back_lock);
1144 break; 1153 break;
1145 case ISCSI_OP_REJECT: 1154 case ISCSI_OP_REJECT:
1146 rc = iscsi_handle_reject(conn, hdr, data, datalen); 1155 rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -1247,9 +1256,9 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1247{ 1256{
1248 int rc; 1257 int rc;
1249 1258
1250 spin_lock(&conn->session->lock); 1259 spin_lock(&conn->session->back_lock);
1251 rc = __iscsi_complete_pdu(conn, hdr, data, datalen); 1260 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
1252 spin_unlock(&conn->session->lock); 1261 spin_unlock(&conn->session->back_lock);
1253 return rc; 1262 return rc;
1254} 1263}
1255EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 1264EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
@@ -1293,7 +1302,7 @@ EXPORT_SYMBOL_GPL(iscsi_verify_itt);
1293 * 1302 *
1294 * This should be used for cmd tasks. 1303 * This should be used for cmd tasks.
1295 * 1304 *
1296 * The session lock must be held. 1305 * The session back_lock must be held.
1297 */ 1306 */
1298struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) 1307struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
1299{ 1308{
@@ -1323,15 +1332,15 @@ void iscsi_session_failure(struct iscsi_session *session,
1323 struct iscsi_conn *conn; 1332 struct iscsi_conn *conn;
1324 struct device *dev; 1333 struct device *dev;
1325 1334
1326 spin_lock_bh(&session->lock); 1335 spin_lock_bh(&session->frwd_lock);
1327 conn = session->leadconn; 1336 conn = session->leadconn;
1328 if (session->state == ISCSI_STATE_TERMINATE || !conn) { 1337 if (session->state == ISCSI_STATE_TERMINATE || !conn) {
1329 spin_unlock_bh(&session->lock); 1338 spin_unlock_bh(&session->frwd_lock);
1330 return; 1339 return;
1331 } 1340 }
1332 1341
1333 dev = get_device(&conn->cls_conn->dev); 1342 dev = get_device(&conn->cls_conn->dev);
1334 spin_unlock_bh(&session->lock); 1343 spin_unlock_bh(&session->frwd_lock);
1335 if (!dev) 1344 if (!dev)
1336 return; 1345 return;
1337 /* 1346 /*
@@ -1351,15 +1360,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
1351{ 1360{
1352 struct iscsi_session *session = conn->session; 1361 struct iscsi_session *session = conn->session;
1353 1362
1354 spin_lock_bh(&session->lock); 1363 spin_lock_bh(&session->frwd_lock);
1355 if (session->state == ISCSI_STATE_FAILED) { 1364 if (session->state == ISCSI_STATE_FAILED) {
1356 spin_unlock_bh(&session->lock); 1365 spin_unlock_bh(&session->frwd_lock);
1357 return; 1366 return;
1358 } 1367 }
1359 1368
1360 if (conn->stop_stage == 0) 1369 if (conn->stop_stage == 0)
1361 session->state = ISCSI_STATE_FAILED; 1370 session->state = ISCSI_STATE_FAILED;
1362 spin_unlock_bh(&session->lock); 1371 spin_unlock_bh(&session->frwd_lock);
1363 1372
1364 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1373 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1365 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1374 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1393,15 +1402,18 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1393 return -ENODATA; 1402 return -ENODATA;
1394 1403
1395 __iscsi_get_task(task); 1404 __iscsi_get_task(task);
1396 spin_unlock_bh(&conn->session->lock); 1405 spin_unlock_bh(&conn->session->frwd_lock);
1397 rc = conn->session->tt->xmit_task(task); 1406 rc = conn->session->tt->xmit_task(task);
1398 spin_lock_bh(&conn->session->lock); 1407 spin_lock_bh(&conn->session->frwd_lock);
1399 if (!rc) { 1408 if (!rc) {
1400 /* done with this task */ 1409 /* done with this task */
1401 task->last_xfer = jiffies; 1410 task->last_xfer = jiffies;
1402 conn->task = NULL; 1411 conn->task = NULL;
1403 } 1412 }
1413 /* regular RX path uses back_lock */
1414 spin_lock_bh(&conn->session->back_lock);
1404 __iscsi_put_task(task); 1415 __iscsi_put_task(task);
1416 spin_unlock_bh(&conn->session->back_lock);
1405 return rc; 1417 return rc;
1406} 1418}
1407 1419
@@ -1410,7 +1422,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1410 * @task: task to requeue 1422 * @task: task to requeue
1411 * 1423 *
1412 * LLDs that need to run a task from the session workqueue should call 1424 * LLDs that need to run a task from the session workqueue should call
1413 * this. The session lock must be held. This should only be called 1425 * this. The session frwd_lock must be held. This should only be called
1414 * by software drivers. 1426 * by software drivers.
1415 */ 1427 */
1416void iscsi_requeue_task(struct iscsi_task *task) 1428void iscsi_requeue_task(struct iscsi_task *task)
@@ -1441,10 +1453,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1441 struct iscsi_task *task; 1453 struct iscsi_task *task;
1442 int rc = 0; 1454 int rc = 0;
1443 1455
1444 spin_lock_bh(&conn->session->lock); 1456 spin_lock_bh(&conn->session->frwd_lock);
1445 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { 1457 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1446 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); 1458 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1447 spin_unlock_bh(&conn->session->lock); 1459 spin_unlock_bh(&conn->session->frwd_lock);
1448 return -ENODATA; 1460 return -ENODATA;
1449 } 1461 }
1450 1462
@@ -1465,7 +1477,10 @@ check_mgmt:
1465 struct iscsi_task, running); 1477 struct iscsi_task, running);
1466 list_del_init(&conn->task->running); 1478 list_del_init(&conn->task->running);
1467 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1479 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1480 /* regular RX path uses back_lock */
1481 spin_lock_bh(&conn->session->back_lock);
1468 __iscsi_put_task(conn->task); 1482 __iscsi_put_task(conn->task);
1483 spin_unlock_bh(&conn->session->back_lock);
1469 conn->task = NULL; 1484 conn->task = NULL;
1470 continue; 1485 continue;
1471 } 1486 }
@@ -1527,11 +1542,11 @@ check_mgmt:
1527 if (!list_empty(&conn->mgmtqueue)) 1542 if (!list_empty(&conn->mgmtqueue))
1528 goto check_mgmt; 1543 goto check_mgmt;
1529 } 1544 }
1530 spin_unlock_bh(&conn->session->lock); 1545 spin_unlock_bh(&conn->session->frwd_lock);
1531 return -ENODATA; 1546 return -ENODATA;
1532 1547
1533done: 1548done:
1534 spin_unlock_bh(&conn->session->lock); 1549 spin_unlock_bh(&conn->session->frwd_lock);
1535 return rc; 1550 return rc;
1536} 1551}
1537 1552
@@ -1600,7 +1615,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1600 1615
1601 cls_session = starget_to_session(scsi_target(sc->device)); 1616 cls_session = starget_to_session(scsi_target(sc->device));
1602 session = cls_session->dd_data; 1617 session = cls_session->dd_data;
1603 spin_lock_bh(&session->lock); 1618 spin_lock_bh(&session->frwd_lock);
1604 1619
1605 reason = iscsi_session_chkready(cls_session); 1620 reason = iscsi_session_chkready(cls_session);
1606 if (reason) { 1621 if (reason) {
@@ -1686,13 +1701,13 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1686 } 1701 }
1687 1702
1688 session->queued_cmdsn++; 1703 session->queued_cmdsn++;
1689 spin_unlock_bh(&session->lock); 1704 spin_unlock_bh(&session->frwd_lock);
1690 return 0; 1705 return 0;
1691 1706
1692prepd_reject: 1707prepd_reject:
1693 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1708 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1694reject: 1709reject:
1695 spin_unlock_bh(&session->lock); 1710 spin_unlock_bh(&session->frwd_lock);
1696 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1711 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
1697 sc->cmnd[0], reason); 1712 sc->cmnd[0], reason);
1698 return SCSI_MLQUEUE_TARGET_BUSY; 1713 return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1700,7 +1715,7 @@ reject:
1700prepd_fault: 1715prepd_fault:
1701 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1716 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1702fault: 1717fault:
1703 spin_unlock_bh(&session->lock); 1718 spin_unlock_bh(&session->frwd_lock);
1704 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1719 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
1705 sc->cmnd[0], reason); 1720 sc->cmnd[0], reason);
1706 if (!scsi_bidi_cmnd(sc)) 1721 if (!scsi_bidi_cmnd(sc))
@@ -1748,14 +1763,14 @@ static void iscsi_tmf_timedout(unsigned long data)
1748 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1763 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1749 struct iscsi_session *session = conn->session; 1764 struct iscsi_session *session = conn->session;
1750 1765
1751 spin_lock(&session->lock); 1766 spin_lock(&session->frwd_lock);
1752 if (conn->tmf_state == TMF_QUEUED) { 1767 if (conn->tmf_state == TMF_QUEUED) {
1753 conn->tmf_state = TMF_TIMEDOUT; 1768 conn->tmf_state = TMF_TIMEDOUT;
1754 ISCSI_DBG_EH(session, "tmf timedout\n"); 1769 ISCSI_DBG_EH(session, "tmf timedout\n");
1755 /* unblock eh_abort() */ 1770 /* unblock eh_abort() */
1756 wake_up(&conn->ehwait); 1771 wake_up(&conn->ehwait);
1757 } 1772 }
1758 spin_unlock(&session->lock); 1773 spin_unlock(&session->frwd_lock);
1759} 1774}
1760 1775
1761static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, 1776static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
@@ -1768,10 +1783,10 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1768 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1783 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1769 NULL, 0); 1784 NULL, 0);
1770 if (!task) { 1785 if (!task) {
1771 spin_unlock_bh(&session->lock); 1786 spin_unlock_bh(&session->frwd_lock);
1772 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); 1787 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
1773 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1788 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1774 spin_lock_bh(&session->lock); 1789 spin_lock_bh(&session->frwd_lock);
1775 return -EPERM; 1790 return -EPERM;
1776 } 1791 }
1777 conn->tmfcmd_pdus_cnt++; 1792 conn->tmfcmd_pdus_cnt++;
@@ -1781,7 +1796,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1781 add_timer(&conn->tmf_timer); 1796 add_timer(&conn->tmf_timer);
1782 ISCSI_DBG_EH(session, "tmf set timeout\n"); 1797 ISCSI_DBG_EH(session, "tmf set timeout\n");
1783 1798
1784 spin_unlock_bh(&session->lock); 1799 spin_unlock_bh(&session->frwd_lock);
1785 mutex_unlock(&session->eh_mutex); 1800 mutex_unlock(&session->eh_mutex);
1786 1801
1787 /* 1802 /*
@@ -1800,7 +1815,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1800 del_timer_sync(&conn->tmf_timer); 1815 del_timer_sync(&conn->tmf_timer);
1801 1816
1802 mutex_lock(&session->eh_mutex); 1817 mutex_lock(&session->eh_mutex);
1803 spin_lock_bh(&session->lock); 1818 spin_lock_bh(&session->frwd_lock);
1804 /* if the session drops it will clean up the task */ 1819 /* if the session drops it will clean up the task */
1805 if (age != session->age || 1820 if (age != session->age ||
1806 session->state != ISCSI_STATE_LOGGED_IN) 1821 session->state != ISCSI_STATE_LOGGED_IN)
@@ -1837,7 +1852,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1837 * iscsi_suspend_queue - suspend iscsi_queuecommand 1852 * iscsi_suspend_queue - suspend iscsi_queuecommand
1838 * @conn: iscsi conn to stop queueing IO on 1853 * @conn: iscsi conn to stop queueing IO on
1839 * 1854 *
1840 * This grabs the session lock to make sure no one is in 1855 * This grabs the session frwd_lock to make sure no one is in
1841 * xmit_task/queuecommand, and then sets suspend to prevent 1856 * xmit_task/queuecommand, and then sets suspend to prevent
1842 * new commands from being queued. This only needs to be called 1857 * new commands from being queued. This only needs to be called
1843 * by offload drivers that need to sync a path like ep disconnect 1858 * by offload drivers that need to sync a path like ep disconnect
@@ -1846,9 +1861,9 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1846 */ 1861 */
1847void iscsi_suspend_queue(struct iscsi_conn *conn) 1862void iscsi_suspend_queue(struct iscsi_conn *conn)
1848{ 1863{
1849 spin_lock_bh(&conn->session->lock); 1864 spin_lock_bh(&conn->session->frwd_lock);
1850 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1865 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1851 spin_unlock_bh(&conn->session->lock); 1866 spin_unlock_bh(&conn->session->frwd_lock);
1852} 1867}
1853EXPORT_SYMBOL_GPL(iscsi_suspend_queue); 1868EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1854 1869
@@ -1907,7 +1922,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1907 1922
1908 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); 1923 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1909 1924
1910 spin_lock(&session->lock); 1925 spin_lock(&session->frwd_lock);
1911 task = (struct iscsi_task *)sc->SCp.ptr; 1926 task = (struct iscsi_task *)sc->SCp.ptr;
1912 if (!task) { 1927 if (!task) {
1913 /* 1928 /*
@@ -2021,7 +2036,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
2021done: 2036done:
2022 if (task) 2037 if (task)
2023 task->last_timeout = jiffies; 2038 task->last_timeout = jiffies;
2024 spin_unlock(&session->lock); 2039 spin_unlock(&session->frwd_lock);
2025 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 2040 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2026 "timer reset" : "nh"); 2041 "timer reset" : "nh");
2027 return rc; 2042 return rc;
@@ -2033,7 +2048,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
2033 struct iscsi_session *session = conn->session; 2048 struct iscsi_session *session = conn->session;
2034 unsigned long recv_timeout, next_timeout = 0, last_recv; 2049 unsigned long recv_timeout, next_timeout = 0, last_recv;
2035 2050
2036 spin_lock(&session->lock); 2051 spin_lock(&session->frwd_lock);
2037 if (session->state != ISCSI_STATE_LOGGED_IN) 2052 if (session->state != ISCSI_STATE_LOGGED_IN)
2038 goto done; 2053 goto done;
2039 2054
@@ -2050,7 +2065,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
2050 "last ping %lu, now %lu\n", 2065 "last ping %lu, now %lu\n",
2051 conn->ping_timeout, conn->recv_timeout, 2066 conn->ping_timeout, conn->recv_timeout,
2052 last_recv, conn->last_ping, jiffies); 2067 last_recv, conn->last_ping, jiffies);
2053 spin_unlock(&session->lock); 2068 spin_unlock(&session->frwd_lock);
2054 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 2069 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2055 return; 2070 return;
2056 } 2071 }
@@ -2066,7 +2081,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
2066 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); 2081 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
2067 mod_timer(&conn->transport_timer, next_timeout); 2082 mod_timer(&conn->transport_timer, next_timeout);
2068done: 2083done:
2069 spin_unlock(&session->lock); 2084 spin_unlock(&session->frwd_lock);
2070} 2085}
2071 2086
2072static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, 2087static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
@@ -2096,7 +2111,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2096 ISCSI_DBG_EH(session, "aborting sc %p\n", sc); 2111 ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
2097 2112
2098 mutex_lock(&session->eh_mutex); 2113 mutex_lock(&session->eh_mutex);
2099 spin_lock_bh(&session->lock); 2114 spin_lock_bh(&session->frwd_lock);
2100 /* 2115 /*
2101 * if session was ISCSI_STATE_IN_RECOVERY then we may not have 2116 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
2102 * got the command. 2117 * got the command.
@@ -2104,7 +2119,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2104 if (!sc->SCp.ptr) { 2119 if (!sc->SCp.ptr) {
2105 ISCSI_DBG_EH(session, "sc never reached iscsi layer or " 2120 ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
2106 "it completed.\n"); 2121 "it completed.\n");
2107 spin_unlock_bh(&session->lock); 2122 spin_unlock_bh(&session->frwd_lock);
2108 mutex_unlock(&session->eh_mutex); 2123 mutex_unlock(&session->eh_mutex);
2109 return SUCCESS; 2124 return SUCCESS;
2110 } 2125 }
@@ -2115,7 +2130,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2115 */ 2130 */
2116 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || 2131 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
2117 sc->SCp.phase != session->age) { 2132 sc->SCp.phase != session->age) {
2118 spin_unlock_bh(&session->lock); 2133 spin_unlock_bh(&session->frwd_lock);
2119 mutex_unlock(&session->eh_mutex); 2134 mutex_unlock(&session->eh_mutex);
2120 ISCSI_DBG_EH(session, "failing abort due to dropped " 2135 ISCSI_DBG_EH(session, "failing abort due to dropped "
2121 "session.\n"); 2136 "session.\n");
@@ -2156,7 +2171,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2156 2171
2157 switch (conn->tmf_state) { 2172 switch (conn->tmf_state) {
2158 case TMF_SUCCESS: 2173 case TMF_SUCCESS:
2159 spin_unlock_bh(&session->lock); 2174 spin_unlock_bh(&session->frwd_lock);
2160 /* 2175 /*
2161 * stop tx side incase the target had sent a abort rsp but 2176 * stop tx side incase the target had sent a abort rsp but
2162 * the initiator was still writing out data. 2177 * the initiator was still writing out data.
@@ -2167,15 +2182,15 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2167 * good and have never sent us a successful tmf response 2182 * good and have never sent us a successful tmf response
2168 * then sent more data for the cmd. 2183 * then sent more data for the cmd.
2169 */ 2184 */
2170 spin_lock_bh(&session->lock); 2185 spin_lock_bh(&session->frwd_lock);
2171 fail_scsi_task(task, DID_ABORT); 2186 fail_scsi_task(task, DID_ABORT);
2172 conn->tmf_state = TMF_INITIAL; 2187 conn->tmf_state = TMF_INITIAL;
2173 memset(hdr, 0, sizeof(*hdr)); 2188 memset(hdr, 0, sizeof(*hdr));
2174 spin_unlock_bh(&session->lock); 2189 spin_unlock_bh(&session->frwd_lock);
2175 iscsi_start_tx(conn); 2190 iscsi_start_tx(conn);
2176 goto success_unlocked; 2191 goto success_unlocked;
2177 case TMF_TIMEDOUT: 2192 case TMF_TIMEDOUT:
2178 spin_unlock_bh(&session->lock); 2193 spin_unlock_bh(&session->frwd_lock);
2179 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2194 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2180 goto failed_unlocked; 2195 goto failed_unlocked;
2181 case TMF_NOT_FOUND: 2196 case TMF_NOT_FOUND:
@@ -2194,7 +2209,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
2194 } 2209 }
2195 2210
2196success: 2211success:
2197 spin_unlock_bh(&session->lock); 2212 spin_unlock_bh(&session->frwd_lock);
2198success_unlocked: 2213success_unlocked:
2199 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", 2214 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
2200 sc, task->itt); 2215 sc, task->itt);
@@ -2202,7 +2217,7 @@ success_unlocked:
2202 return SUCCESS; 2217 return SUCCESS;
2203 2218
2204failed: 2219failed:
2205 spin_unlock_bh(&session->lock); 2220 spin_unlock_bh(&session->frwd_lock);
2206failed_unlocked: 2221failed_unlocked:
2207 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, 2222 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
2208 task ? task->itt : 0); 2223 task ? task->itt : 0);
@@ -2235,7 +2250,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2235 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 2250 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
2236 2251
2237 mutex_lock(&session->eh_mutex); 2252 mutex_lock(&session->eh_mutex);
2238 spin_lock_bh(&session->lock); 2253 spin_lock_bh(&session->frwd_lock);
2239 /* 2254 /*
2240 * Just check if we are not logged in. We cannot check for 2255 * Just check if we are not logged in. We cannot check for
2241 * the phase because the reset could come from a ioctl. 2256 * the phase because the reset could come from a ioctl.
@@ -2262,7 +2277,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2262 case TMF_SUCCESS: 2277 case TMF_SUCCESS:
2263 break; 2278 break;
2264 case TMF_TIMEDOUT: 2279 case TMF_TIMEDOUT:
2265 spin_unlock_bh(&session->lock); 2280 spin_unlock_bh(&session->frwd_lock);
2266 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2281 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2267 goto done; 2282 goto done;
2268 default: 2283 default:
@@ -2271,21 +2286,21 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
2271 } 2286 }
2272 2287
2273 rc = SUCCESS; 2288 rc = SUCCESS;
2274 spin_unlock_bh(&session->lock); 2289 spin_unlock_bh(&session->frwd_lock);
2275 2290
2276 iscsi_suspend_tx(conn); 2291 iscsi_suspend_tx(conn);
2277 2292
2278 spin_lock_bh(&session->lock); 2293 spin_lock_bh(&session->frwd_lock);
2279 memset(hdr, 0, sizeof(*hdr)); 2294 memset(hdr, 0, sizeof(*hdr));
2280 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); 2295 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
2281 conn->tmf_state = TMF_INITIAL; 2296 conn->tmf_state = TMF_INITIAL;
2282 spin_unlock_bh(&session->lock); 2297 spin_unlock_bh(&session->frwd_lock);
2283 2298
2284 iscsi_start_tx(conn); 2299 iscsi_start_tx(conn);
2285 goto done; 2300 goto done;
2286 2301
2287unlock: 2302unlock:
2288 spin_unlock_bh(&session->lock); 2303 spin_unlock_bh(&session->frwd_lock);
2289done: 2304done:
2290 ISCSI_DBG_EH(session, "dev reset result = %s\n", 2305 ISCSI_DBG_EH(session, "dev reset result = %s\n",
2291 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2306 rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2298,13 +2313,13 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
2298{ 2313{
2299 struct iscsi_session *session = cls_session->dd_data; 2314 struct iscsi_session *session = cls_session->dd_data;
2300 2315
2301 spin_lock_bh(&session->lock); 2316 spin_lock_bh(&session->frwd_lock);
2302 if (session->state != ISCSI_STATE_LOGGED_IN) { 2317 if (session->state != ISCSI_STATE_LOGGED_IN) {
2303 session->state = ISCSI_STATE_RECOVERY_FAILED; 2318 session->state = ISCSI_STATE_RECOVERY_FAILED;
2304 if (session->leadconn) 2319 if (session->leadconn)
2305 wake_up(&session->leadconn->ehwait); 2320 wake_up(&session->leadconn->ehwait);
2306 } 2321 }
2307 spin_unlock_bh(&session->lock); 2322 spin_unlock_bh(&session->frwd_lock);
2308} 2323}
2309EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); 2324EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2310 2325
@@ -2326,19 +2341,19 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2326 conn = session->leadconn; 2341 conn = session->leadconn;
2327 2342
2328 mutex_lock(&session->eh_mutex); 2343 mutex_lock(&session->eh_mutex);
2329 spin_lock_bh(&session->lock); 2344 spin_lock_bh(&session->frwd_lock);
2330 if (session->state == ISCSI_STATE_TERMINATE) { 2345 if (session->state == ISCSI_STATE_TERMINATE) {
2331failed: 2346failed:
2332 ISCSI_DBG_EH(session, 2347 ISCSI_DBG_EH(session,
2333 "failing session reset: Could not log back into " 2348 "failing session reset: Could not log back into "
2334 "%s, %s [age %d]\n", session->targetname, 2349 "%s, %s [age %d]\n", session->targetname,
2335 conn->persistent_address, session->age); 2350 conn->persistent_address, session->age);
2336 spin_unlock_bh(&session->lock); 2351 spin_unlock_bh(&session->frwd_lock);
2337 mutex_unlock(&session->eh_mutex); 2352 mutex_unlock(&session->eh_mutex);
2338 return FAILED; 2353 return FAILED;
2339 } 2354 }
2340 2355
2341 spin_unlock_bh(&session->lock); 2356 spin_unlock_bh(&session->frwd_lock);
2342 mutex_unlock(&session->eh_mutex); 2357 mutex_unlock(&session->eh_mutex);
2343 /* 2358 /*
2344 * we drop the lock here but the leadconn cannot be destoyed while 2359 * we drop the lock here but the leadconn cannot be destoyed while
@@ -2355,14 +2370,14 @@ failed:
2355 flush_signals(current); 2370 flush_signals(current);
2356 2371
2357 mutex_lock(&session->eh_mutex); 2372 mutex_lock(&session->eh_mutex);
2358 spin_lock_bh(&session->lock); 2373 spin_lock_bh(&session->frwd_lock);
2359 if (session->state == ISCSI_STATE_LOGGED_IN) { 2374 if (session->state == ISCSI_STATE_LOGGED_IN) {
2360 ISCSI_DBG_EH(session, 2375 ISCSI_DBG_EH(session,
2361 "session reset succeeded for %s,%s\n", 2376 "session reset succeeded for %s,%s\n",
2362 session->targetname, conn->persistent_address); 2377 session->targetname, conn->persistent_address);
2363 } else 2378 } else
2364 goto failed; 2379 goto failed;
2365 spin_unlock_bh(&session->lock); 2380 spin_unlock_bh(&session->frwd_lock);
2366 mutex_unlock(&session->eh_mutex); 2381 mutex_unlock(&session->eh_mutex);
2367 return SUCCESS; 2382 return SUCCESS;
2368} 2383}
@@ -2398,7 +2413,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2398 session->targetname); 2413 session->targetname);
2399 2414
2400 mutex_lock(&session->eh_mutex); 2415 mutex_lock(&session->eh_mutex);
2401 spin_lock_bh(&session->lock); 2416 spin_lock_bh(&session->frwd_lock);
2402 /* 2417 /*
2403 * Just check if we are not logged in. We cannot check for 2418 * Just check if we are not logged in. We cannot check for
2404 * the phase because the reset could come from a ioctl. 2419 * the phase because the reset could come from a ioctl.
@@ -2425,7 +2440,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2425 case TMF_SUCCESS: 2440 case TMF_SUCCESS:
2426 break; 2441 break;
2427 case TMF_TIMEDOUT: 2442 case TMF_TIMEDOUT:
2428 spin_unlock_bh(&session->lock); 2443 spin_unlock_bh(&session->frwd_lock);
2429 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2444 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
2430 goto done; 2445 goto done;
2431 default: 2446 default:
@@ -2434,21 +2449,21 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2434 } 2449 }
2435 2450
2436 rc = SUCCESS; 2451 rc = SUCCESS;
2437 spin_unlock_bh(&session->lock); 2452 spin_unlock_bh(&session->frwd_lock);
2438 2453
2439 iscsi_suspend_tx(conn); 2454 iscsi_suspend_tx(conn);
2440 2455
2441 spin_lock_bh(&session->lock); 2456 spin_lock_bh(&session->frwd_lock);
2442 memset(hdr, 0, sizeof(*hdr)); 2457 memset(hdr, 0, sizeof(*hdr));
2443 fail_scsi_tasks(conn, -1, DID_ERROR); 2458 fail_scsi_tasks(conn, -1, DID_ERROR);
2444 conn->tmf_state = TMF_INITIAL; 2459 conn->tmf_state = TMF_INITIAL;
2445 spin_unlock_bh(&session->lock); 2460 spin_unlock_bh(&session->frwd_lock);
2446 2461
2447 iscsi_start_tx(conn); 2462 iscsi_start_tx(conn);
2448 goto done; 2463 goto done;
2449 2464
2450unlock: 2465unlock:
2451 spin_unlock_bh(&session->lock); 2466 spin_unlock_bh(&session->frwd_lock);
2452done: 2467done:
2453 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, 2468 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2454 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2469 rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2746,8 +2761,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2746 session->max_r2t = 1; 2761 session->max_r2t = 1;
2747 session->tt = iscsit; 2762 session->tt = iscsit;
2748 session->dd_data = cls_session->dd_data + sizeof(*session); 2763 session->dd_data = cls_session->dd_data + sizeof(*session);
2764
2749 mutex_init(&session->eh_mutex); 2765 mutex_init(&session->eh_mutex);
2750 spin_lock_init(&session->lock); 2766 spin_lock_init(&session->frwd_lock);
2767 spin_lock_init(&session->back_lock);
2751 2768
2752 /* initialize SCSI PDU commands pool */ 2769 /* initialize SCSI PDU commands pool */
2753 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 2770 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -2861,14 +2878,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2861 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2878 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2862 2879
2863 /* allocate login_task used for the login/text sequences */ 2880 /* allocate login_task used for the login/text sequences */
2864 spin_lock_bh(&session->lock); 2881 spin_lock_bh(&session->frwd_lock);
2865 if (!kfifo_out(&session->cmdpool.queue, 2882 if (!kfifo_out(&session->cmdpool.queue,
2866 (void*)&conn->login_task, 2883 (void*)&conn->login_task,
2867 sizeof(void*))) { 2884 sizeof(void*))) {
2868 spin_unlock_bh(&session->lock); 2885 spin_unlock_bh(&session->frwd_lock);
2869 goto login_task_alloc_fail; 2886 goto login_task_alloc_fail;
2870 } 2887 }
2871 spin_unlock_bh(&session->lock); 2888 spin_unlock_bh(&session->frwd_lock);
2872 2889
2873 data = (char *) __get_free_pages(GFP_KERNEL, 2890 data = (char *) __get_free_pages(GFP_KERNEL,
2874 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 2891 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -2905,7 +2922,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2905 2922
2906 del_timer_sync(&conn->transport_timer); 2923 del_timer_sync(&conn->transport_timer);
2907 2924
2908 spin_lock_bh(&session->lock); 2925 spin_lock_bh(&session->frwd_lock);
2909 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2926 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2910 if (session->leadconn == conn) { 2927 if (session->leadconn == conn) {
2911 /* 2928 /*
@@ -2914,7 +2931,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2914 session->state = ISCSI_STATE_TERMINATE; 2931 session->state = ISCSI_STATE_TERMINATE;
2915 wake_up(&conn->ehwait); 2932 wake_up(&conn->ehwait);
2916 } 2933 }
2917 spin_unlock_bh(&session->lock); 2934 spin_unlock_bh(&session->frwd_lock);
2918 2935
2919 /* 2936 /*
2920 * Block until all in-progress commands for this connection 2937 * Block until all in-progress commands for this connection
@@ -2941,16 +2958,19 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2941 /* flush queued up work because we free the connection below */ 2958 /* flush queued up work because we free the connection below */
2942 iscsi_suspend_tx(conn); 2959 iscsi_suspend_tx(conn);
2943 2960
2944 spin_lock_bh(&session->lock); 2961 spin_lock_bh(&session->frwd_lock);
2945 free_pages((unsigned long) conn->data, 2962 free_pages((unsigned long) conn->data,
2946 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 2963 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2947 kfree(conn->persistent_address); 2964 kfree(conn->persistent_address);
2948 kfree(conn->local_ipaddr); 2965 kfree(conn->local_ipaddr);
2966 /* regular RX path uses back_lock */
2967 spin_lock_bh(&session->back_lock);
2949 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, 2968 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2950 sizeof(void*)); 2969 sizeof(void*));
2970 spin_unlock_bh(&session->back_lock);
2951 if (session->leadconn == conn) 2971 if (session->leadconn == conn)
2952 session->leadconn = NULL; 2972 session->leadconn = NULL;
2953 spin_unlock_bh(&session->lock); 2973 spin_unlock_bh(&session->frwd_lock);
2954 2974
2955 iscsi_destroy_conn(cls_conn); 2975 iscsi_destroy_conn(cls_conn);
2956} 2976}
@@ -2987,7 +3007,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2987 conn->ping_timeout = 5; 3007 conn->ping_timeout = 5;
2988 } 3008 }
2989 3009
2990 spin_lock_bh(&session->lock); 3010 spin_lock_bh(&session->frwd_lock);
2991 conn->c_stage = ISCSI_CONN_STARTED; 3011 conn->c_stage = ISCSI_CONN_STARTED;
2992 session->state = ISCSI_STATE_LOGGED_IN; 3012 session->state = ISCSI_STATE_LOGGED_IN;
2993 session->queued_cmdsn = session->cmdsn; 3013 session->queued_cmdsn = session->cmdsn;
@@ -3016,7 +3036,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
3016 default: 3036 default:
3017 break; 3037 break;
3018 } 3038 }
3019 spin_unlock_bh(&session->lock); 3039 spin_unlock_bh(&session->frwd_lock);
3020 3040
3021 iscsi_unblock_session(session->cls_session); 3041 iscsi_unblock_session(session->cls_session);
3022 wake_up(&conn->ehwait); 3042 wake_up(&conn->ehwait);
@@ -3055,9 +3075,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
3055 int old_stop_stage; 3075 int old_stop_stage;
3056 3076
3057 mutex_lock(&session->eh_mutex); 3077 mutex_lock(&session->eh_mutex);
3058 spin_lock_bh(&session->lock); 3078 spin_lock_bh(&session->frwd_lock);
3059 if (conn->stop_stage == STOP_CONN_TERM) { 3079 if (conn->stop_stage == STOP_CONN_TERM) {
3060 spin_unlock_bh(&session->lock); 3080 spin_unlock_bh(&session->frwd_lock);
3061 mutex_unlock(&session->eh_mutex); 3081 mutex_unlock(&session->eh_mutex);
3062 return; 3082 return;
3063 } 3083 }
@@ -3074,14 +3094,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
3074 3094
3075 old_stop_stage = conn->stop_stage; 3095 old_stop_stage = conn->stop_stage;
3076 conn->stop_stage = flag; 3096 conn->stop_stage = flag;
3077 spin_unlock_bh(&session->lock); 3097 spin_unlock_bh(&session->frwd_lock);
3078 3098
3079 del_timer_sync(&conn->transport_timer); 3099 del_timer_sync(&conn->transport_timer);
3080 iscsi_suspend_tx(conn); 3100 iscsi_suspend_tx(conn);
3081 3101
3082 spin_lock_bh(&session->lock); 3102 spin_lock_bh(&session->frwd_lock);
3083 conn->c_stage = ISCSI_CONN_STOPPED; 3103 conn->c_stage = ISCSI_CONN_STOPPED;
3084 spin_unlock_bh(&session->lock); 3104 spin_unlock_bh(&session->frwd_lock);
3085 3105
3086 /* 3106 /*
3087 * for connection level recovery we should not calculate 3107 * for connection level recovery we should not calculate
@@ -3102,11 +3122,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
3102 /* 3122 /*
3103 * flush queues. 3123 * flush queues.
3104 */ 3124 */
3105 spin_lock_bh(&session->lock); 3125 spin_lock_bh(&session->frwd_lock);
3106 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); 3126 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
3107 fail_mgmt_tasks(session, conn); 3127 fail_mgmt_tasks(session, conn);
3108 memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); 3128 memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
3109 spin_unlock_bh(&session->lock); 3129 spin_unlock_bh(&session->frwd_lock);
3110 mutex_unlock(&session->eh_mutex); 3130 mutex_unlock(&session->eh_mutex);
3111} 3131}
3112 3132
@@ -3133,10 +3153,10 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
3133 struct iscsi_session *session = cls_session->dd_data; 3153 struct iscsi_session *session = cls_session->dd_data;
3134 struct iscsi_conn *conn = cls_conn->dd_data; 3154 struct iscsi_conn *conn = cls_conn->dd_data;
3135 3155
3136 spin_lock_bh(&session->lock); 3156 spin_lock_bh(&session->frwd_lock);
3137 if (is_leading) 3157 if (is_leading)
3138 session->leadconn = conn; 3158 session->leadconn = conn;
3139 spin_unlock_bh(&session->lock); 3159 spin_unlock_bh(&session->frwd_lock);
3140 3160
3141 /* 3161 /*
3142 * Unblock xmitworker(), Login Phase will pass through. 3162 * Unblock xmitworker(), Login Phase will pass through.
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 1d58d5336018..60cb6dc3c6f0 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
446 * iscsi_tcp_cleanup_task - free tcp_task resources 446 * iscsi_tcp_cleanup_task - free tcp_task resources
447 * @task: iscsi task 447 * @task: iscsi task
448 * 448 *
449 * must be called with session lock 449 * must be called with session back_lock
450 */ 450 */
451void iscsi_tcp_cleanup_task(struct iscsi_task *task) 451void iscsi_tcp_cleanup_task(struct iscsi_task *task)
452{ 452{
@@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
457 if (!task->sc) 457 if (!task->sc)
458 return; 458 return;
459 459
460 spin_lock_bh(&tcp_task->queue2pool);
460 /* flush task's r2t queues */ 461 /* flush task's r2t queues */
461 while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { 462 while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
462 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, 463 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
@@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
470 sizeof(void*)); 471 sizeof(void*));
471 tcp_task->r2t = NULL; 472 tcp_task->r2t = NULL;
472 } 473 }
474 spin_unlock_bh(&tcp_task->queue2pool);
473} 475}
474EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task); 476EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
475 477
@@ -529,6 +531,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
529 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; 531 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
530 struct iscsi_r2t_info *r2t; 532 struct iscsi_r2t_info *r2t;
531 int r2tsn = be32_to_cpu(rhdr->r2tsn); 533 int r2tsn = be32_to_cpu(rhdr->r2tsn);
534 u32 data_length;
535 u32 data_offset;
532 int rc; 536 int rc;
533 537
534 if (tcp_conn->in.datalen) { 538 if (tcp_conn->in.datalen) {
@@ -554,40 +558,41 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
554 return 0; 558 return 0;
555 } 559 }
556 560
557 rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); 561 data_length = be32_to_cpu(rhdr->data_length);
558 if (!rc) { 562 if (data_length == 0) {
559 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
560 "Target has sent more R2Ts than it "
561 "negotiated for or driver has leaked.\n");
562 return ISCSI_ERR_PROTO;
563 }
564
565 r2t->exp_statsn = rhdr->statsn;
566 r2t->data_length = be32_to_cpu(rhdr->data_length);
567 if (r2t->data_length == 0) {
568 iscsi_conn_printk(KERN_ERR, conn, 563 iscsi_conn_printk(KERN_ERR, conn,
569 "invalid R2T with zero data len\n"); 564 "invalid R2T with zero data len\n");
570 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
571 sizeof(void*));
572 return ISCSI_ERR_DATALEN; 565 return ISCSI_ERR_DATALEN;
573 } 566 }
574 567
575 if (r2t->data_length > session->max_burst) 568 if (data_length > session->max_burst)
576 ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max " 569 ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
577 "burst %u. Attempting to execute request.\n", 570 "burst %u. Attempting to execute request.\n",
578 r2t->data_length, session->max_burst); 571 data_length, session->max_burst);
579 572
580 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 573 data_offset = be32_to_cpu(rhdr->data_offset);
581 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) { 574 if (data_offset + data_length > scsi_out(task->sc)->length) {
582 iscsi_conn_printk(KERN_ERR, conn, 575 iscsi_conn_printk(KERN_ERR, conn,
583 "invalid R2T with data len %u at offset %u " 576 "invalid R2T with data len %u at offset %u "
584 "and total length %d\n", r2t->data_length, 577 "and total length %d\n", data_length,
585 r2t->data_offset, scsi_out(task->sc)->length); 578 data_offset, scsi_out(task->sc)->length);
586 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
587 sizeof(void*));
588 return ISCSI_ERR_DATALEN; 579 return ISCSI_ERR_DATALEN;
589 } 580 }
590 581
582 spin_lock(&tcp_task->pool2queue);
583 rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
584 if (!rc) {
585 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
586 "Target has sent more R2Ts than it "
587 "negotiated for or driver has leaked.\n");
588 spin_unlock(&tcp_task->pool2queue);
589 return ISCSI_ERR_PROTO;
590 }
591
592 r2t->exp_statsn = rhdr->statsn;
593 r2t->data_length = data_length;
594 r2t->data_offset = data_offset;
595
591 r2t->ttt = rhdr->ttt; /* no flip */ 596 r2t->ttt = rhdr->ttt; /* no flip */
592 r2t->datasn = 0; 597 r2t->datasn = 0;
593 r2t->sent = 0; 598 r2t->sent = 0;
@@ -595,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
595 tcp_task->exp_datasn = r2tsn + 1; 600 tcp_task->exp_datasn = r2tsn + 1;
596 kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); 601 kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
597 conn->r2t_pdus_cnt++; 602 conn->r2t_pdus_cnt++;
603 spin_unlock(&tcp_task->pool2queue);
598 604
599 iscsi_requeue_task(task); 605 iscsi_requeue_task(task);
600 return 0; 606 return 0;
@@ -667,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
667 673
668 switch(opcode) { 674 switch(opcode) {
669 case ISCSI_OP_SCSI_DATA_IN: 675 case ISCSI_OP_SCSI_DATA_IN:
670 spin_lock(&conn->session->lock); 676 spin_lock(&conn->session->back_lock);
671 task = iscsi_itt_to_ctask(conn, hdr->itt); 677 task = iscsi_itt_to_ctask(conn, hdr->itt);
672 if (!task) 678 if (!task)
673 rc = ISCSI_ERR_BAD_ITT; 679 rc = ISCSI_ERR_BAD_ITT;
674 else 680 else
675 rc = iscsi_tcp_data_in(conn, task); 681 rc = iscsi_tcp_data_in(conn, task);
676 if (rc) { 682 if (rc) {
677 spin_unlock(&conn->session->lock); 683 spin_unlock(&conn->session->back_lock);
678 break; 684 break;
679 } 685 }
680 686
@@ -707,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
707 tcp_conn->in.datalen, 713 tcp_conn->in.datalen,
708 iscsi_tcp_process_data_in, 714 iscsi_tcp_process_data_in,
709 rx_hash); 715 rx_hash);
710 spin_unlock(&conn->session->lock); 716 spin_unlock(&conn->session->back_lock);
711 return rc; 717 return rc;
712 } 718 }
713 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 719 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
714 spin_unlock(&conn->session->lock); 720 spin_unlock(&conn->session->back_lock);
715 break; 721 break;
716 case ISCSI_OP_SCSI_CMD_RSP: 722 case ISCSI_OP_SCSI_CMD_RSP:
717 if (tcp_conn->in.datalen) { 723 if (tcp_conn->in.datalen) {
@@ -721,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
721 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 727 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
722 break; 728 break;
723 case ISCSI_OP_R2T: 729 case ISCSI_OP_R2T:
724 spin_lock(&conn->session->lock); 730 spin_lock(&conn->session->back_lock);
725 task = iscsi_itt_to_ctask(conn, hdr->itt); 731 task = iscsi_itt_to_ctask(conn, hdr->itt);
732 spin_unlock(&conn->session->back_lock);
726 if (!task) 733 if (!task)
727 rc = ISCSI_ERR_BAD_ITT; 734 rc = ISCSI_ERR_BAD_ITT;
728 else if (ahslen) 735 else if (ahslen)
729 rc = ISCSI_ERR_AHSLEN; 736 rc = ISCSI_ERR_AHSLEN;
730 else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { 737 else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
731 task->last_xfer = jiffies; 738 task->last_xfer = jiffies;
739 spin_lock(&conn->session->frwd_lock);
732 rc = iscsi_tcp_r2t_rsp(conn, task); 740 rc = iscsi_tcp_r2t_rsp(conn, task);
741 spin_unlock(&conn->session->frwd_lock);
733 } else 742 } else
734 rc = ISCSI_ERR_PROTO; 743 rc = ISCSI_ERR_PROTO;
735 spin_unlock(&conn->session->lock);
736 break; 744 break;
737 case ISCSI_OP_LOGIN_RSP: 745 case ISCSI_OP_LOGIN_RSP:
738 case ISCSI_OP_TEXT_RSP: 746 case ISCSI_OP_TEXT_RSP:
@@ -980,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
980 988
981static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) 989static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
982{ 990{
983 struct iscsi_session *session = task->conn->session;
984 struct iscsi_tcp_task *tcp_task = task->dd_data; 991 struct iscsi_tcp_task *tcp_task = task->dd_data;
985 struct iscsi_r2t_info *r2t = NULL; 992 struct iscsi_r2t_info *r2t = NULL;
986 993
987 if (iscsi_task_has_unsol_data(task)) 994 if (iscsi_task_has_unsol_data(task))
988 r2t = &task->unsol_r2t; 995 r2t = &task->unsol_r2t;
989 else { 996 else {
990 spin_lock_bh(&session->lock); 997 spin_lock_bh(&tcp_task->queue2pool);
991 if (tcp_task->r2t) { 998 if (tcp_task->r2t) {
992 r2t = tcp_task->r2t; 999 r2t = tcp_task->r2t;
993 /* Continue with this R2T? */ 1000 /* Continue with this R2T? */
@@ -1009,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
1009 else 1016 else
1010 r2t = tcp_task->r2t; 1017 r2t = tcp_task->r2t;
1011 } 1018 }
1012 spin_unlock_bh(&session->lock); 1019 spin_unlock_bh(&tcp_task->queue2pool);
1013 } 1020 }
1014 1021
1015 return r2t; 1022 return r2t;
@@ -1139,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1139 iscsi_pool_free(&tcp_task->r2tpool); 1146 iscsi_pool_free(&tcp_task->r2tpool);
1140 goto r2t_alloc_fail; 1147 goto r2t_alloc_fail;
1141 } 1148 }
1149 spin_lock_init(&tcp_task->pool2queue);
1150 spin_lock_init(&tcp_task->queue2pool);
1142 } 1151 }
1143 1152
1144 return 0; 1153 return 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da3aee17faa5..25d0f127424d 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -862,7 +862,7 @@ out:
862 862
863enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 863enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
864{ 864{
865 scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd); 865 scmd_dbg(cmd, "command %p timed out\n", cmd);
866 866
867 return BLK_EH_NOT_HANDLED; 867 return BLK_EH_NOT_HANDLED;
868} 868}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75ca7451..94a3cafe7197 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@ struct lpfc_sli2_slim;
73 */ 73 */
74/* 1 Second */ 74/* 1 Second */
75#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) 75#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
76/* 5 minutes */
77#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
78 76
79/* Number of exchanges reserved for discovery to complete */ 77/* Number of exchanges reserved for discovery to complete */
80#define LPFC_DISC_IOCB_BUFF_COUNT 20 78#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@ struct lpfc_hba {
722 uint32_t cfg_hba_queue_depth; 720 uint32_t cfg_hba_queue_depth;
723 uint32_t cfg_enable_hba_reset; 721 uint32_t cfg_enable_hba_reset;
724 uint32_t cfg_enable_hba_heartbeat; 722 uint32_t cfg_enable_hba_heartbeat;
723 uint32_t cfg_fof;
724 uint32_t cfg_EnableXLane;
725 uint8_t cfg_oas_tgt_wwpn[8];
726 uint8_t cfg_oas_vpt_wwpn[8];
727 uint32_t cfg_oas_lun_state;
728#define OAS_LUN_ENABLE 1
729#define OAS_LUN_DISABLE 0
730 uint32_t cfg_oas_lun_status;
731#define OAS_LUN_STATUS_EXISTS 0x01
732 uint32_t cfg_oas_flags;
733#define OAS_FIND_ANY_VPORT 0x01
734#define OAS_FIND_ANY_TARGET 0x02
735#define OAS_LUN_VALID 0x04
736 uint32_t cfg_XLanePriority;
725 uint32_t cfg_enable_bg; 737 uint32_t cfg_enable_bg;
726 uint32_t cfg_hostmem_hgp; 738 uint32_t cfg_hostmem_hgp;
727 uint32_t cfg_log_verbose; 739 uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@ struct lpfc_hba {
730 uint32_t cfg_request_firmware_upgrade; 742 uint32_t cfg_request_firmware_upgrade;
731 uint32_t cfg_iocb_cnt; 743 uint32_t cfg_iocb_cnt;
732 uint32_t cfg_suppress_link_up; 744 uint32_t cfg_suppress_link_up;
745 uint32_t cfg_rrq_xri_bitmap_sz;
733#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 746#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
734#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ 747#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
735#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ 748#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
@@ -835,6 +848,7 @@ struct lpfc_hba {
835 mempool_t *mbox_mem_pool; 848 mempool_t *mbox_mem_pool;
836 mempool_t *nlp_mem_pool; 849 mempool_t *nlp_mem_pool;
837 mempool_t *rrq_pool; 850 mempool_t *rrq_pool;
851 mempool_t *active_rrq_pool;
838 852
839 struct fc_host_statistics link_stats; 853 struct fc_host_statistics link_stats;
840 enum intr_type_t intr_type; 854 enum intr_type_t intr_type;
@@ -869,7 +883,6 @@ struct lpfc_hba {
869 atomic_t num_cmd_success; 883 atomic_t num_cmd_success;
870 unsigned long last_rsrc_error_time; 884 unsigned long last_rsrc_error_time;
871 unsigned long last_ramp_down_time; 885 unsigned long last_ramp_down_time;
872 unsigned long last_ramp_up_time;
873#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 886#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
874 struct dentry *hba_debugfs_root; 887 struct dentry *hba_debugfs_root;
875 atomic_t debugfs_vport_count; 888 atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@ struct lpfc_hba {
971 atomic_t sdev_cnt; 984 atomic_t sdev_cnt;
972 uint8_t fips_spec_rev; 985 uint8_t fips_spec_rev;
973 uint8_t fips_level; 986 uint8_t fips_level;
987 spinlock_t devicelock; /* lock for luns list */
988 mempool_t *device_data_mem_pool;
989 struct list_head luns;
974}; 990};
975 991
976static inline struct Scsi_Host * 992static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc92b93..8d5b6ceec9c9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
529} 529}
530 530
531/** 531/**
532 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
533 * (OAS) is supported.
534 * @dev: class unused variable.
535 * @attr: device attribute, not used.
536 * @buf: on return contains the module description text.
537 *
538 * Returns: size of formatted string.
539 **/
540static ssize_t
541lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
542 char *buf)
543{
544 struct Scsi_Host *shost = class_to_shost(dev);
545 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
546 struct lpfc_hba *phba = vport->phba;
547
548 return snprintf(buf, PAGE_SIZE, "%d\n",
549 phba->sli4_hba.pc_sli4_params.oas_supported);
550}
551
552/**
532 * lpfc_link_state_store - Transition the link_state on an HBA port 553 * lpfc_link_state_store - Transition the link_state on an HBA port
533 * @dev: class device that is converted into a Scsi_host. 554 * @dev: class device that is converted into a Scsi_host.
534 * @attr: device attribute, not used. 555 * @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
2041static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, 2062static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
2042 lpfc_sriov_hw_max_virtfn_show, NULL); 2063 lpfc_sriov_hw_max_virtfn_show, NULL);
2043static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); 2064static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2065static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2066 NULL);
2044 2067
2045static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 2068static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2069#define WWN_SZ 8
2070/**
2071 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2072 * @buf: WWN string.
2073 * @cnt: Length of string.
2074 * @wwn: Array to receive converted wwn value.
2075 *
2076 * Returns:
2077 * -EINVAL if the buffer does not contain a valid wwn
2078 * 0 success
2079 **/
2080static size_t
2081lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2082{
2083 unsigned int i, j;
2084
2085 /* Count may include a LF at end of string */
2086 if (buf[cnt-1] == '\n')
2087 cnt--;
2046 2088
2089 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2090 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2091 return -EINVAL;
2092
2093 memset(wwn, 0, WWN_SZ);
2094
2095 /* Validate and store the new name */
2096 for (i = 0, j = 0; i < 16; i++) {
2097 if ((*buf >= 'a') && (*buf <= 'f'))
2098 j = ((j << 4) | ((*buf++ - 'a') + 10));
2099 else if ((*buf >= 'A') && (*buf <= 'F'))
2100 j = ((j << 4) | ((*buf++ - 'A') + 10));
2101 else if ((*buf >= '0') && (*buf <= '9'))
2102 j = ((j << 4) | (*buf++ - '0'));
2103 else
2104 return -EINVAL;
2105 if (i % 2) {
2106 wwn[i/2] = j & 0xff;
2107 j = 0;
2108 }
2109 }
2110 return 0;
2111}
2047/** 2112/**
2048 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid 2113 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2049 * @dev: class device that is converted into a Scsi_host. 2114 * @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2132 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2197 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2133 struct lpfc_hba *phba = vport->phba; 2198 struct lpfc_hba *phba = vport->phba;
2134 struct completion online_compl; 2199 struct completion online_compl;
2135 int stat1=0, stat2=0; 2200 int stat1 = 0, stat2 = 0;
2136 unsigned int i, j, cnt=count; 2201 unsigned int cnt = count;
2137 u8 wwpn[8]; 2202 u8 wwpn[WWN_SZ];
2138 int rc; 2203 int rc;
2139 2204
2140 if (!phba->cfg_enable_hba_reset) 2205 if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2149 if (buf[cnt-1] == '\n') 2214 if (buf[cnt-1] == '\n')
2150 cnt--; 2215 cnt--;
2151 2216
2152 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || 2217 if (!phba->soft_wwn_enable)
2153 ((cnt == 17) && (*buf++ != 'x')) ||
2154 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2155 return -EINVAL; 2218 return -EINVAL;
2156 2219
2220 /* lock setting wwpn, wwnn down */
2157 phba->soft_wwn_enable = 0; 2221 phba->soft_wwn_enable = 0;
2158 2222
2159 memset(wwpn, 0, sizeof(wwpn)); 2223 rc = lpfc_wwn_set(buf, cnt, wwpn);
2160 2224 if (!rc) {
2161 /* Validate and store the new name */ 2225 /* not able to set wwpn, unlock it */
2162 for (i=0, j=0; i < 16; i++) { 2226 phba->soft_wwn_enable = 1;
2163 int value; 2227 return rc;
2164
2165 value = hex_to_bin(*buf++);
2166 if (value >= 0)
2167 j = (j << 4) | value;
2168 else
2169 return -EINVAL;
2170 if (i % 2) {
2171 wwpn[i/2] = j & 0xff;
2172 j = 0;
2173 }
2174 } 2228 }
2229
2175 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 2230 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2176 fc_host_port_name(shost) = phba->cfg_soft_wwpn; 2231 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2177 if (phba->cfg_soft_wwnn) 2232 if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2198 "reinit adapter - %d\n", stat2); 2253 "reinit adapter - %d\n", stat2);
2199 return (stat1 || stat2) ? -EIO : count; 2254 return (stat1 || stat2) ? -EIO : count;
2200} 2255}
2201static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ 2256static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
2202 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); 2257 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
2203 2258
2204/** 2259/**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2235{ 2290{
2236 struct Scsi_Host *shost = class_to_shost(dev); 2291 struct Scsi_Host *shost = class_to_shost(dev);
2237 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2292 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2238 unsigned int i, j, cnt=count; 2293 unsigned int cnt = count;
2239 u8 wwnn[8]; 2294 u8 wwnn[WWN_SZ];
2295 int rc;
2240 2296
2241 /* count may include a LF at end of string */ 2297 /* count may include a LF at end of string */
2242 if (buf[cnt-1] == '\n') 2298 if (buf[cnt-1] == '\n')
2243 cnt--; 2299 cnt--;
2244 2300
2245 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || 2301 if (!phba->soft_wwn_enable)
2246 ((cnt == 17) && (*buf++ != 'x')) ||
2247 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2248 return -EINVAL; 2302 return -EINVAL;
2249 2303
2250 /* 2304 rc = lpfc_wwn_set(buf, cnt, wwnn);
2251 * Allow wwnn to be set many times, as long as the enable is set. 2305 if (!rc) {
2252 * However, once the wwpn is set, everything locks. 2306 /* Allow wwnn to be set many times, as long as the enable
2253 */ 2307 * is set. However, once the wwpn is set, everything locks.
2254 2308 */
2255 memset(wwnn, 0, sizeof(wwnn)); 2309 return rc;
2256
2257 /* Validate and store the new name */
2258 for (i=0, j=0; i < 16; i++) {
2259 int value;
2260
2261 value = hex_to_bin(*buf++);
2262 if (value >= 0)
2263 j = (j << 4) | value;
2264 else
2265 return -EINVAL;
2266 if (i % 2) {
2267 wwnn[i/2] = j & 0xff;
2268 j = 0;
2269 }
2270 } 2310 }
2311
2271 phba->cfg_soft_wwnn = wwn_to_u64(wwnn); 2312 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2272 2313
2273 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 2314 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2276 2317
2277 return count; 2318 return count;
2278} 2319}
2279static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ 2320static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
2280 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); 2321 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
2281 2322
2323/**
2324 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2325 * Optimized Access Storage (OAS) operations.
2326 * @dev: class device that is converted into a Scsi_host.
2327 * @attr: device attribute, not used.
2328 * @buf: buffer for passing information.
2329 *
2330 * Returns:
2331 * value of count
2332 **/
2333static ssize_t
2334lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2335 char *buf)
2336{
2337 struct Scsi_Host *shost = class_to_shost(dev);
2338 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2339
2340 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
2341 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2342}
2343
2344/**
2345 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2346 * Optimized Access Storage (OAS) operations.
2347 * @dev: class device that is converted into a Scsi_host.
2348 * @attr: device attribute, not used.
2349 * @buf: buffer for passing information.
2350 * @count: Size of the data buffer.
2351 *
2352 * Returns:
2353 * -EINVAL count is invalid, invalid wwpn byte invalid
2354 * -EPERM oas is not supported by hba
2355 * value of count on success
2356 **/
2357static ssize_t
2358lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2359 const char *buf, size_t count)
2360{
2361 struct Scsi_Host *shost = class_to_shost(dev);
2362 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2363 unsigned int cnt = count;
2364 uint8_t wwpn[WWN_SZ];
2365 int rc;
2366
2367 if (!phba->cfg_EnableXLane)
2368 return -EPERM;
2369
2370 /* count may include a LF at end of string */
2371 if (buf[cnt-1] == '\n')
2372 cnt--;
2373
2374 rc = lpfc_wwn_set(buf, cnt, wwpn);
2375 if (rc)
2376 return rc;
2377
2378 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2379 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2380 if (wwn_to_u64(wwpn) == 0)
2381 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2382 else
2383 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2384 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2385 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2386 return count;
2387}
2388static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2389 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2390
2391/**
2392 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
2393 * for Optimized Access Storage (OAS) operations.
2394 * @dev: class device that is converted into a Scsi_host.
2395 * @attr: device attribute, not used.
2396 * @buf: buffer for passing information.
2397 *
2398 * Returns:
2399 * value of count on success
2400 **/
2401static ssize_t
2402lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
2403 char *buf)
2404{
2405 struct Scsi_Host *shost = class_to_shost(dev);
2406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2407
2408 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
2409 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
2410}
2411
2412/**
2413 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
2414 * for Optimized Access Storage (OAS) operations.
2415 * @dev: class device that is converted into a Scsi_host.
2416 * @attr: device attribute, not used.
2417 * @buf: buffer for passing information.
2418 * @count: Size of the data buffer.
2419 *
2420 * Returns:
2421 * -EINVAL count is invalid, invalid wwpn byte invalid
2422 * -EPERM oas is not supported by hba
2423 * value of count on success
2424 **/
2425static ssize_t
2426lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
2427 const char *buf, size_t count)
2428{
2429 struct Scsi_Host *shost = class_to_shost(dev);
2430 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2431 unsigned int cnt = count;
2432 uint8_t wwpn[WWN_SZ];
2433 int rc;
2434
2435 if (!phba->cfg_EnableXLane)
2436 return -EPERM;
2437
2438 /* count may include a LF at end of string */
2439 if (buf[cnt-1] == '\n')
2440 cnt--;
2441
2442 rc = lpfc_wwn_set(buf, cnt, wwpn);
2443 if (rc)
2444 return rc;
2445
2446 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2447 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2448 if (wwn_to_u64(wwpn) == 0)
2449 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
2450 else
2451 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
2452 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2453 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2454 return count;
2455}
2456static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
2457 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
2458
2459/**
2460 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
2461 * of whether luns will be enabled or disabled
2462 * for Optimized Access Storage (OAS) operations.
2463 * @dev: class device that is converted into a Scsi_host.
2464 * @attr: device attribute, not used.
2465 * @buf: buffer for passing information.
2466 *
2467 * Returns:
2468 * size of formatted string.
2469 **/
2470static ssize_t
2471lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
2472 char *buf)
2473{
2474 struct Scsi_Host *shost = class_to_shost(dev);
2475 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2476
2477 return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
2478}
2479
2480/**
2481 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
2482 * of whether luns will be enabled or disabled
2483 * for Optimized Access Storage (OAS) operations.
2484 * @dev: class device that is converted into a Scsi_host.
2485 * @attr: device attribute, not used.
2486 * @buf: buffer for passing information.
2487 * @count: Size of the data buffer.
2488 *
2489 * Returns:
2490 * -EINVAL count is invalid, invalid wwpn byte invalid
2491 * -EPERM oas is not supported by hba
2492 * value of count on success
2493 **/
2494static ssize_t
2495lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
2496 const char *buf, size_t count)
2497{
2498 struct Scsi_Host *shost = class_to_shost(dev);
2499 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2500 int val = 0;
2501
2502 if (!phba->cfg_EnableXLane)
2503 return -EPERM;
2504
2505 if (!isdigit(buf[0]))
2506 return -EINVAL;
2507
2508 if (sscanf(buf, "%i", &val) != 1)
2509 return -EINVAL;
2510
2511 if ((val != 0) && (val != 1))
2512 return -EINVAL;
2513
2514 phba->cfg_oas_lun_state = val;
2515
2516 return strlen(buf);
2517}
2518static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
2519 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
2520
2521/**
2522 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
2523 * Storage (OAS) lun returned by the
2524 * lpfc_oas_lun_show function.
2525 * @dev: class device that is converted into a Scsi_host.
2526 * @attr: device attribute, not used.
2527 * @buf: buffer for passing information.
2528 *
2529 * Returns:
2530 * size of formatted string.
2531 **/
2532static ssize_t
2533lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
2534 char *buf)
2535{
2536 struct Scsi_Host *shost = class_to_shost(dev);
2537 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2538
2539 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
2540 return -EFAULT;
2541
2542 return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
2543}
2544static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
2545 lpfc_oas_lun_status_show, NULL);
2546
2547
2548/**
2549 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
2550 * (OAS) operations.
2551 * @phba: lpfc_hba pointer.
2552 * @ndlp: pointer to fcp target node.
2553 * @lun: the fc lun for setting oas state.
2554 * @oas_state: the oas state to be set to the lun.
2555 *
2556 * Returns:
2557 * SUCCESS : 0
2558 * -EPERM OAS is not enabled or not supported by this port.
2559 *
2560 */
2561static size_t
2562lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2563 uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
2564{
2565
2566 int rc = 0;
2567
2568 if (!phba->cfg_EnableXLane)
2569 return -EPERM;
2570
2571 if (oas_state) {
2572 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
2573 (struct lpfc_name *)tgt_wwpn, lun))
2574 rc = -ENOMEM;
2575 } else {
2576 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
2577 (struct lpfc_name *)tgt_wwpn, lun);
2578 }
2579 return rc;
2580
2581}
2582
2583/**
2584 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
2585 * Access Storage (OAS) operations.
2586 * @phba: lpfc_hba pointer.
2587 * @vpt_wwpn: wwpn of the vport associated with the returned lun
2588 * @tgt_wwpn: wwpn of the target associated with the returned lun
2589 * @lun_status: status of the lun returned lun
2590 *
2591 * Returns the first or next lun enabled for OAS operations for the vport/target
2592 * specified. If a lun is found, its vport wwpn, target wwpn and status is
2593 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
2594 *
2595 * Return:
2596 * lun that is OAS enabled for the vport/target
2597 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
2598 */
2599static uint64_t
2600lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2601 uint8_t tgt_wwpn[], uint32_t *lun_status)
2602{
2603 uint64_t found_lun;
2604
2605 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
2606 return NOT_OAS_ENABLED_LUN;
2607 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
2608 phba->sli4_hba.oas_next_vpt_wwpn,
2609 (struct lpfc_name *)
2610 phba->sli4_hba.oas_next_tgt_wwpn,
2611 &phba->sli4_hba.oas_next_lun,
2612 (struct lpfc_name *)vpt_wwpn,
2613 (struct lpfc_name *)tgt_wwpn,
2614 &found_lun, lun_status))
2615 return found_lun;
2616 else
2617 return NOT_OAS_ENABLED_LUN;
2618}
2619
2620/**
2621 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
2622 * @phba: lpfc_hba pointer.
2623 * @vpt_wwpn: vport wwpn by reference.
2624 * @tgt_wwpn: target wwpn by reference.
2625 * @lun: the fc lun for setting oas state.
2626 * @oas_state: the oas state to be set to the oas_lun.
2627 *
2628 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
2629 * a lun for OAS operations.
2630 *
2631 * Return:
2632 * SUCCESS: 0
2633 * -ENOMEM: failed to enable an lun for OAS operations
2634 * -EPERM: OAS is not enabled
2635 */
2636static ssize_t
2637lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2638 uint8_t tgt_wwpn[], uint64_t lun,
2639 uint32_t oas_state)
2640{
2641
2642 int rc;
2643
2644 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
2645 oas_state);
2646 return rc;
2647}
2648
2649/**
2650 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
2651 * @dev: class device that is converted into a Scsi_host.
2652 * @attr: device attribute, not used.
2653 * @buf: buffer for passing information.
2654 *
2655 * This routine returns a lun enabled for OAS each time the function
2656 * is called.
2657 *
2658 * Returns:
2659 * SUCCESS: size of formatted string.
2660 * -EFAULT: target or vport wwpn was not set properly.
2661 * -EPERM: oas is not enabled.
2662 **/
2663static ssize_t
2664lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
2665 char *buf)
2666{
2667 struct Scsi_Host *shost = class_to_shost(dev);
2668 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2669
2670 uint64_t oas_lun;
2671 int len = 0;
2672
2673 if (!phba->cfg_EnableXLane)
2674 return -EPERM;
2675
2676 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
2677 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
2678 return -EFAULT;
2679
2680 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
2681 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
2682 return -EFAULT;
2683
2684 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
2685 phba->cfg_oas_tgt_wwpn,
2686 &phba->cfg_oas_lun_status);
2687 if (oas_lun != NOT_OAS_ENABLED_LUN)
2688 phba->cfg_oas_flags |= OAS_LUN_VALID;
2689
2690 len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
2691
2692 return len;
2693}
2694
2695/**
2696 * lpfc_oas_lun_store - Sets the OAS state for lun
2697 * @dev: class device that is converted into a Scsi_host.
2698 * @attr: device attribute, not used.
2699 * @buf: buffer for passing information.
2700 *
2701 * This function sets the OAS state for lun. Before this function is called,
2702 * the vport wwpn, target wwpn, and oas state need to be set.
2703 *
2704 * Returns:
2705 * SUCCESS: size of formatted string.
2706 * -EFAULT: target or vport wwpn was not set properly.
2707 * -EPERM: oas is not enabled.
2708 * size of formatted string.
2709 **/
2710static ssize_t
2711lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2712 const char *buf, size_t count)
2713{
2714 struct Scsi_Host *shost = class_to_shost(dev);
2715 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2716 uint64_t scsi_lun;
2717 ssize_t rc;
2718
2719 if (!phba->cfg_EnableXLane)
2720 return -EPERM;
2721
2722 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
2723 return -EFAULT;
2724
2725 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
2726 return -EFAULT;
2727
2728 if (!isdigit(buf[0]))
2729 return -EINVAL;
2730
2731 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
2732 return -EINVAL;
2733
2734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2735 "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
2736 "with oas set to %d\n",
2737 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
2738 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
2739 phba->cfg_oas_lun_state);
2740
2741 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
2742 phba->cfg_oas_tgt_wwpn, scsi_lun,
2743 phba->cfg_oas_lun_state);
2744
2745 if (rc)
2746 return rc;
2747
2748 return count;
2749}
2750static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
2751 lpfc_oas_lun_show, lpfc_oas_lun_store);
2282 2752
2283static int lpfc_poll = 0; 2753static int lpfc_poll = 0;
2284module_param(lpfc_poll, int, S_IRUGO); 2754module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
3818 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; 4288 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3819 struct lpfc_hba *phba = vport->phba; 4289 struct lpfc_hba *phba = vport->phba;
3820 struct lpfc_vector_map_info *cpup; 4290 struct lpfc_vector_map_info *cpup;
3821 int idx, len = 0; 4291 int len = 0;
3822 4292
3823 if ((phba->sli_rev != LPFC_SLI_REV4) || 4293 if ((phba->sli_rev != LPFC_SLI_REV4) ||
3824 (phba->intr_type != MSIX)) 4294 (phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
3846 break; 4316 break;
3847 } 4317 }
3848 4318
3849 cpup = phba->sli4_hba.cpu_map; 4319 while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
3850 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 4320 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
4321
4322 /* margin should fit in this and the truncated message */
3851 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) 4323 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
3852 len += snprintf(buf + len, PAGE_SIZE-len, 4324 len += snprintf(buf + len, PAGE_SIZE-len,
3853 "CPU %02d io_chan %02d " 4325 "CPU %02d io_chan %02d "
3854 "physid %d coreid %d\n", 4326 "physid %d coreid %d\n",
3855 idx, cpup->channel_id, cpup->phys_id, 4327 phba->sli4_hba.curr_disp_cpu,
4328 cpup->channel_id, cpup->phys_id,
3856 cpup->core_id); 4329 cpup->core_id);
3857 else 4330 else
3858 len += snprintf(buf + len, PAGE_SIZE-len, 4331 len += snprintf(buf + len, PAGE_SIZE-len,
3859 "CPU %02d io_chan %02d " 4332 "CPU %02d io_chan %02d "
3860 "physid %d coreid %d IRQ %d\n", 4333 "physid %d coreid %d IRQ %d\n",
3861 idx, cpup->channel_id, cpup->phys_id, 4334 phba->sli4_hba.curr_disp_cpu,
4335 cpup->channel_id, cpup->phys_id,
3862 cpup->core_id, cpup->irq); 4336 cpup->core_id, cpup->irq);
3863 4337
3864 cpup++; 4338 phba->sli4_hba.curr_disp_cpu++;
4339
4340 /* display max number of CPUs keeping some margin */
4341 if (phba->sli4_hba.curr_disp_cpu <
4342 phba->sli4_hba.num_present_cpu &&
4343 (len >= (PAGE_SIZE - 64))) {
4344 len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
4345 break;
4346 }
3865 } 4347 }
4348
4349 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
4350 phba->sli4_hba.curr_disp_cpu = 0;
4351
3866 return len; 4352 return len;
3867} 4353}
3868 4354
@@ -4157,6 +4643,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
4157LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); 4643LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
4158 4644
4159/* 4645/*
4646# lpfc_EnableXLane: Enable Express Lane Feature
4647# 0x0 Express Lane Feature disabled
4648# 0x1 Express Lane Feature enabled
4649# Value range is [0,1]. Default value is 0.
4650*/
4651LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
4652
4653/*
4654# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
4655# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
4656# Value range is [0x0,0x7f]. Default value is 0
4657*/
4658LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
4659
4660/*
4160# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 4661# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
4161# 0 = BlockGuard disabled (default) 4662# 0 = BlockGuard disabled (default)
4162# 1 = BlockGuard enabled 4663# 1 = BlockGuard enabled
@@ -4317,6 +4818,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
4317 &dev_attr_lpfc_soft_wwn_enable, 4818 &dev_attr_lpfc_soft_wwn_enable,
4318 &dev_attr_lpfc_enable_hba_reset, 4819 &dev_attr_lpfc_enable_hba_reset,
4319 &dev_attr_lpfc_enable_hba_heartbeat, 4820 &dev_attr_lpfc_enable_hba_heartbeat,
4821 &dev_attr_lpfc_EnableXLane,
4822 &dev_attr_lpfc_XLanePriority,
4823 &dev_attr_lpfc_xlane_lun,
4824 &dev_attr_lpfc_xlane_tgt,
4825 &dev_attr_lpfc_xlane_vpt,
4826 &dev_attr_lpfc_xlane_lun_state,
4827 &dev_attr_lpfc_xlane_lun_status,
4320 &dev_attr_lpfc_sg_seg_cnt, 4828 &dev_attr_lpfc_sg_seg_cnt,
4321 &dev_attr_lpfc_max_scsicmpl_time, 4829 &dev_attr_lpfc_max_scsicmpl_time,
4322 &dev_attr_lpfc_stat_data_ctrl, 4830 &dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4335 &dev_attr_lpfc_dss, 4843 &dev_attr_lpfc_dss,
4336 &dev_attr_lpfc_sriov_hw_max_virtfn, 4844 &dev_attr_lpfc_sriov_hw_max_virtfn,
4337 &dev_attr_protocol, 4845 &dev_attr_protocol,
4846 &dev_attr_lpfc_xlane_supported,
4338 NULL, 4847 NULL,
4339}; 4848};
4340 4849
@@ -5296,11 +5805,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5296 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5805 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
5297 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 5806 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
5298 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 5807 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
5808 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
5809 if (phba->sli_rev != LPFC_SLI_REV4)
5810 phba->cfg_EnableXLane = 0;
5811 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
5812 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
5813 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
5814 phba->cfg_oas_lun_state = 0;
5815 phba->cfg_oas_lun_status = 0;
5816 phba->cfg_oas_flags = 0;
5299 lpfc_enable_bg_init(phba, lpfc_enable_bg); 5817 lpfc_enable_bg_init(phba, lpfc_enable_bg);
5300 if (phba->sli_rev == LPFC_SLI_REV4) 5818 if (phba->sli_rev == LPFC_SLI_REV4)
5301 phba->cfg_poll = 0; 5819 phba->cfg_poll = 0;
5302 else 5820 else
5303 phba->cfg_poll = lpfc_poll; 5821 phba->cfg_poll = lpfc_poll;
5304 phba->cfg_soft_wwnn = 0L; 5822 phba->cfg_soft_wwnn = 0L;
5305 phba->cfg_soft_wwpn = 0L; 5823 phba->cfg_soft_wwpn = 0L;
5306 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 5824 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20e2d8..ca2f4ea7cdef 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4153 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 4153 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4154 switch (opcode) { 4154 switch (opcode) {
4155 case FCOE_OPCODE_READ_FCF: 4155 case FCOE_OPCODE_READ_FCF:
4156 case FCOE_OPCODE_GET_DPORT_RESULTS:
4156 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4157 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4157 "2957 Handled SLI_CONFIG " 4158 "2957 Handled SLI_CONFIG "
4158 "subsys_fcoe, opcode:x%x\n", 4159 "subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4161 nemb_mse, dmabuf); 4162 nemb_mse, dmabuf);
4162 break; 4163 break;
4163 case FCOE_OPCODE_ADD_FCF: 4164 case FCOE_OPCODE_ADD_FCF:
4165 case FCOE_OPCODE_SET_DPORT_MODE:
4166 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4164 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4167 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4165 "2958 Handled SLI_CONFIG " 4168 "2958 Handled SLI_CONFIG "
4166 "subsys_fcoe, opcode:x%x\n", 4169 "subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a160d1..a94d4c9dfaa5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys {
231#define SLI_CONFIG_SUBSYS_FCOE 0x0C 231#define SLI_CONFIG_SUBSYS_FCOE 0x0C
232#define FCOE_OPCODE_READ_FCF 0x08 232#define FCOE_OPCODE_READ_FCF 0x08
233#define FCOE_OPCODE_ADD_FCF 0x09 233#define FCOE_OPCODE_ADD_FCF 0x09
234#define FCOE_OPCODE_SET_DPORT_MODE 0x27
235#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28
234}; 236};
235 237
236struct lpfc_sli_config_emb1_subsys { 238struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a84239..adda0bf7a244 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
187void lpfc_offline(struct lpfc_hba *); 187void lpfc_offline(struct lpfc_hba *);
188void lpfc_reset_hba(struct lpfc_hba *); 188void lpfc_reset_hba(struct lpfc_hba *);
189 189
190int lpfc_fof_queue_create(struct lpfc_hba *);
191int lpfc_fof_queue_setup(struct lpfc_hba *);
192int lpfc_fof_queue_destroy(struct lpfc_hba *);
193irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
194
190int lpfc_sli_setup(struct lpfc_hba *); 195int lpfc_sli_setup(struct lpfc_hba *);
191int lpfc_sli_queue_setup(struct lpfc_hba *); 196int lpfc_sli_queue_setup(struct lpfc_hba *);
192 197
@@ -242,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
242void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); 247void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
243 248
244int lpfc_mem_alloc(struct lpfc_hba *, int align); 249int lpfc_mem_alloc(struct lpfc_hba *, int align);
250int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
245void lpfc_mem_free(struct lpfc_hba *); 251void lpfc_mem_free(struct lpfc_hba *);
246void lpfc_mem_free_all(struct lpfc_hba *); 252void lpfc_mem_free_all(struct lpfc_hba *);
247void lpfc_stop_vport_timers(struct lpfc_vport *); 253void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@ void lpfc_fabric_block_timeout(unsigned long);
399void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); 405void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
400void lpfc_rampdown_queue_depth(struct lpfc_hba *); 406void lpfc_rampdown_queue_depth(struct lpfc_hba *);
401void lpfc_ramp_down_queue_handler(struct lpfc_hba *); 407void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
402void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
403void lpfc_scsi_dev_block(struct lpfc_hba *); 408void lpfc_scsi_dev_block(struct lpfc_hba *);
404 409
405void 410void
@@ -471,3 +476,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); 476uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); 477int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
473void lpfc_sli4_offline_eratt(struct lpfc_hba *); 478void lpfc_sli4_offline_eratt(struct lpfc_hba *);
479
480struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
481 struct lpfc_name *,
482 struct lpfc_name *,
483 uint64_t, bool);
484void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
485struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
486 struct list_head *list,
487 struct lpfc_name *,
488 struct lpfc_name *, uint64_t);
489bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
490 struct lpfc_name *, uint64_t);
491bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
492 struct lpfc_name *, uint64_t);
493bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
494 struct lpfc_name *, uint64_t *, struct lpfc_name *,
495 struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc952ca6..828c08e9389e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@ proc_cq:
2280 } 2280 }
2281 } 2281 }
2282 2282
2283 if (phba->cfg_fof) {
2284 /* FOF EQ */
2285 qp = phba->sli4_hba.fof_eq;
2286 if (!qp)
2287 goto out;
2288
2289 len += snprintf(pbuffer+len,
2290 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2291 "\nFOF EQ info: "
2292 "EQ-STAT[max:x%x noE:x%x "
2293 "bs:x%x proc:x%llx]\n",
2294 qp->q_cnt_1, qp->q_cnt_2,
2295 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2296
2297 len += snprintf(pbuffer+len,
2298 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2299 "EQID[%02d], "
2300 "QE-CNT[%04d], QE-SIZE[%04d], "
2301 "HOST-IDX[%04d], PORT-IDX[%04d]",
2302 qp->queue_id,
2303 qp->entry_count,
2304 qp->entry_size,
2305 qp->host_index,
2306 qp->hba_index);
2307
2308 /* Reset max counter */
2309 qp->EQ_max_eqe = 0;
2310
2311 len += snprintf(pbuffer+len,
2312 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2313 if (len >= max_cnt)
2314 goto too_big;
2315 }
2316
2317 if (phba->cfg_EnableXLane) {
2318
2319 /* OAS CQ */
2320 qp = phba->sli4_hba.oas_cq;
2321 if (qp) {
2322 len += snprintf(pbuffer+len,
2323 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2324 "\tOAS CQ info: ");
2325 len += snprintf(pbuffer+len,
2326 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2327 "AssocEQID[%02d]: "
2328 "CQ STAT[max:x%x relw:x%x "
2329 "xabt:x%x wq:x%llx]\n",
2330 qp->assoc_qid,
2331 qp->q_cnt_1, qp->q_cnt_2,
2332 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2333 len += snprintf(pbuffer+len,
2334 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2335 "\tCQID[%02d], "
2336 "QE-CNT[%04d], QE-SIZE[%04d], "
2337 "HOST-IDX[%04d], PORT-IDX[%04d]",
2338 qp->queue_id, qp->entry_count,
2339 qp->entry_size, qp->host_index,
2340 qp->hba_index);
2341
2342 /* Reset max counter */
2343 qp->CQ_max_cqe = 0;
2344
2345 len += snprintf(pbuffer+len,
2346 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2347 if (len >= max_cnt)
2348 goto too_big;
2349 }
2350
2351 /* OAS WQ */
2352 qp = phba->sli4_hba.oas_wq;
2353 if (qp) {
2354 len += snprintf(pbuffer+len,
2355 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2356 "\t\tOAS WQ info: ");
2357 len += snprintf(pbuffer+len,
2358 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2359 "AssocCQID[%02d]: "
2360 "WQ-STAT[oflow:x%x posted:x%llx]\n",
2361 qp->assoc_qid,
2362 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
2363 len += snprintf(pbuffer+len,
2364 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2365 "\t\tWQID[%02d], "
2366 "QE-CNT[%04d], QE-SIZE[%04d], "
2367 "HOST-IDX[%04d], PORT-IDX[%04d]",
2368 qp->queue_id,
2369 qp->entry_count,
2370 qp->entry_size,
2371 qp->host_index,
2372 qp->hba_index);
2373
2374 len += snprintf(pbuffer+len,
2375 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2376 if (len >= max_cnt)
2377 goto too_big;
2378 }
2379 }
2380out:
2283 spin_unlock_irq(&phba->hbalock); 2381 spin_unlock_irq(&phba->hbalock);
2284 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2382 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2285 2383
@@ -3927,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
3927 struct lpfc_hba *phba = vport->phba; 4025 struct lpfc_hba *phba = vport->phba;
3928 char name[64]; 4026 char name[64];
3929 uint32_t num, i; 4027 uint32_t num, i;
4028 bool pport_setup = false;
3930 4029
3931 if (!lpfc_debugfs_enable) 4030 if (!lpfc_debugfs_enable)
3932 return; 4031 return;
@@ -3947,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
3947 /* Setup funcX directory for specific HBA PCI function */ 4046 /* Setup funcX directory for specific HBA PCI function */
3948 snprintf(name, sizeof(name), "fn%d", phba->brd_no); 4047 snprintf(name, sizeof(name), "fn%d", phba->brd_no);
3949 if (!phba->hba_debugfs_root) { 4048 if (!phba->hba_debugfs_root) {
4049 pport_setup = true;
3950 phba->hba_debugfs_root = 4050 phba->hba_debugfs_root =
3951 debugfs_create_dir(name, lpfc_debugfs_root); 4051 debugfs_create_dir(name, lpfc_debugfs_root);
3952 if (!phba->hba_debugfs_root) { 4052 if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
4239 } 4339 }
4240 4340
4241 /* 4341 /*
4342 * The following section is for additional directories/files for the
4343 * physical port.
4344 */
4345
4346 if (!pport_setup)
4347 goto debug_failed;
4348
4349 /*
4242 * iDiag debugfs root entry points for SLI4 device only 4350 * iDiag debugfs root entry points for SLI4 device only
4243 */ 4351 */
4244 if (phba->sli_rev < LPFC_SLI_REV4) 4352 if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5f728c..1a6fe524940d 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@ struct lpfc_nodelist {
116 atomic_t cmd_pending; 116 atomic_t cmd_pending;
117 uint32_t cmd_qdepth; 117 uint32_t cmd_qdepth;
118 unsigned long last_change_time; 118 unsigned long last_change_time;
119 struct lpfc_node_rrqs active_rrqs; 119 unsigned long *active_rrqs_xri_bitmap;
120 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ 120 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
121}; 121};
122struct lpfc_node_rrq { 122struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f0c58d..624fe0b3cc0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1516 uint32_t rc, keepDID = 0; 1516 uint32_t rc, keepDID = 0;
1517 int put_node; 1517 int put_node;
1518 int put_rport; 1518 int put_rport;
1519 struct lpfc_node_rrqs rrq; 1519 unsigned long *active_rrqs_xri_bitmap = NULL;
1520 1520
1521 /* Fabric nodes can have the same WWPN so we don't bother searching 1521 /* Fabric nodes can have the same WWPN so we don't bother searching
1522 * by WWPN. Just return the ndlp that was given to us. 1522 * by WWPN. Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1534 1534
1535 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1535 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1536 return ndlp; 1536 return ndlp;
1537 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1537 if (phba->sli_rev == LPFC_SLI_REV4) {
1538 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1539 GFP_KERNEL);
1540 if (active_rrqs_xri_bitmap)
1541 memset(active_rrqs_xri_bitmap, 0,
1542 phba->cfg_rrq_xri_bitmap_sz);
1543 }
1538 1544
1539 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1545 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1540 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", 1546 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1543 if (!new_ndlp) { 1549 if (!new_ndlp) {
1544 rc = memcmp(&ndlp->nlp_portname, name, 1550 rc = memcmp(&ndlp->nlp_portname, name,
1545 sizeof(struct lpfc_name)); 1551 sizeof(struct lpfc_name));
1546 if (!rc) 1552 if (!rc) {
1553 if (active_rrqs_xri_bitmap)
1554 mempool_free(active_rrqs_xri_bitmap,
1555 phba->active_rrq_pool);
1547 return ndlp; 1556 return ndlp;
1557 }
1548 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 1558 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1549 if (!new_ndlp) 1559 if (!new_ndlp) {
1560 if (active_rrqs_xri_bitmap)
1561 mempool_free(active_rrqs_xri_bitmap,
1562 phba->active_rrq_pool);
1550 return ndlp; 1563 return ndlp;
1564 }
1551 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 1565 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1552 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1566 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1553 rc = memcmp(&ndlp->nlp_portname, name, 1567 rc = memcmp(&ndlp->nlp_portname, name,
1554 sizeof(struct lpfc_name)); 1568 sizeof(struct lpfc_name));
1555 if (!rc) 1569 if (!rc) {
1570 if (active_rrqs_xri_bitmap)
1571 mempool_free(active_rrqs_xri_bitmap,
1572 phba->active_rrq_pool);
1556 return ndlp; 1573 return ndlp;
1574 }
1557 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1575 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1558 NLP_STE_UNUSED_NODE); 1576 NLP_STE_UNUSED_NODE);
1559 if (!new_ndlp) 1577 if (!new_ndlp) {
1578 if (active_rrqs_xri_bitmap)
1579 mempool_free(active_rrqs_xri_bitmap,
1580 phba->active_rrq_pool);
1560 return ndlp; 1581 return ndlp;
1582 }
1561 keepDID = new_ndlp->nlp_DID; 1583 keepDID = new_ndlp->nlp_DID;
1562 if (phba->sli_rev == LPFC_SLI_REV4) 1584 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1563 memcpy(&rrq.xri_bitmap, 1585 memcpy(active_rrqs_xri_bitmap,
1564 &new_ndlp->active_rrqs.xri_bitmap, 1586 new_ndlp->active_rrqs_xri_bitmap,
1565 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1587 phba->cfg_rrq_xri_bitmap_sz);
1566 } else { 1588 } else {
1567 keepDID = new_ndlp->nlp_DID; 1589 keepDID = new_ndlp->nlp_DID;
1568 if (phba->sli_rev == LPFC_SLI_REV4) 1590 if (phba->sli_rev == LPFC_SLI_REV4 &&
1569 memcpy(&rrq.xri_bitmap, 1591 active_rrqs_xri_bitmap)
1570 &new_ndlp->active_rrqs.xri_bitmap, 1592 memcpy(active_rrqs_xri_bitmap,
1571 sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1593 new_ndlp->active_rrqs_xri_bitmap,
1594 phba->cfg_rrq_xri_bitmap_sz);
1572 } 1595 }
1573 1596
1574 lpfc_unreg_rpi(vport, new_ndlp); 1597 lpfc_unreg_rpi(vport, new_ndlp);
1575 new_ndlp->nlp_DID = ndlp->nlp_DID; 1598 new_ndlp->nlp_DID = ndlp->nlp_DID;
1576 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1599 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1577 if (phba->sli_rev == LPFC_SLI_REV4) 1600 if (phba->sli_rev == LPFC_SLI_REV4)
1578 memcpy(new_ndlp->active_rrqs.xri_bitmap, 1601 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1579 &ndlp->active_rrqs.xri_bitmap, 1602 ndlp->active_rrqs_xri_bitmap,
1580 sizeof(ndlp->active_rrqs.xri_bitmap)); 1603 phba->cfg_rrq_xri_bitmap_sz);
1581 1604
1582 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) 1605 if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1583 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1606 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1619 1642
1620 /* Two ndlps cannot have the same did on the nodelist */ 1643 /* Two ndlps cannot have the same did on the nodelist */
1621 ndlp->nlp_DID = keepDID; 1644 ndlp->nlp_DID = keepDID;
1622 if (phba->sli_rev == LPFC_SLI_REV4) 1645 if (phba->sli_rev == LPFC_SLI_REV4 &&
1623 memcpy(&ndlp->active_rrqs.xri_bitmap, 1646 active_rrqs_xri_bitmap)
1624 &rrq.xri_bitmap, 1647 memcpy(ndlp->active_rrqs_xri_bitmap,
1625 sizeof(ndlp->active_rrqs.xri_bitmap)); 1648 active_rrqs_xri_bitmap,
1649 phba->cfg_rrq_xri_bitmap_sz);
1626 lpfc_drop_node(vport, ndlp); 1650 lpfc_drop_node(vport, ndlp);
1627 } 1651 }
1628 else { 1652 else {
@@ -1634,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1634 1658
1635 /* Two ndlps cannot have the same did */ 1659 /* Two ndlps cannot have the same did */
1636 ndlp->nlp_DID = keepDID; 1660 ndlp->nlp_DID = keepDID;
1637 if (phba->sli_rev == LPFC_SLI_REV4) 1661 if (phba->sli_rev == LPFC_SLI_REV4 &&
1638 memcpy(&ndlp->active_rrqs.xri_bitmap, 1662 active_rrqs_xri_bitmap)
1639 &rrq.xri_bitmap, 1663 memcpy(ndlp->active_rrqs_xri_bitmap,
1640 sizeof(ndlp->active_rrqs.xri_bitmap)); 1664 active_rrqs_xri_bitmap,
1665 phba->cfg_rrq_xri_bitmap_sz);
1641 1666
1642 /* Since we are swapping the ndlp passed in with the new one 1667 /* Since we are swapping the ndlp passed in with the new one
1643 * and the did has already been swapped, copy over state. 1668 * and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1668 put_device(&rport->dev); 1693 put_device(&rport->dev);
1669 } 1694 }
1670 } 1695 }
1696 if (phba->sli_rev == LPFC_SLI_REV4 &&
1697 active_rrqs_xri_bitmap)
1698 mempool_free(active_rrqs_xri_bitmap,
1699 phba->active_rrq_pool);
1671 return new_ndlp; 1700 return new_ndlp;
1672} 1701}
1673 1702
@@ -2772,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2772 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2801 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2773 * trigger the release of node. 2802 * trigger the release of node.
2774 */ 2803 */
2804
2775 lpfc_nlp_put(ndlp); 2805 lpfc_nlp_put(ndlp);
2776 return 0; 2806 return 0;
2777} 2807}
@@ -6193,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr)
6193 6223
6194 spin_lock_irqsave(&vport->work_port_lock, iflag); 6224 spin_lock_irqsave(&vport->work_port_lock, iflag);
6195 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 6225 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
6196 if (!tmo_posted) 6226 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
6197 vport->work_port_events |= WORKER_ELS_TMO; 6227 vport->work_port_events |= WORKER_ELS_TMO;
6198 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 6228 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
6199 6229
6200 if (!tmo_posted) 6230 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
6201 lpfc_worker_wake_up(phba); 6231 lpfc_worker_wake_up(phba);
6202 return; 6232 return;
6203} 6233}
@@ -6223,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6223 uint32_t els_command = 0; 6253 uint32_t els_command = 0;
6224 uint32_t timeout; 6254 uint32_t timeout;
6225 uint32_t remote_ID = 0xffffffff; 6255 uint32_t remote_ID = 0xffffffff;
6226 LIST_HEAD(txcmplq_completions);
6227 LIST_HEAD(abort_list); 6256 LIST_HEAD(abort_list);
6228 6257
6229 6258
6230 timeout = (uint32_t)(phba->fc_ratov << 1); 6259 timeout = (uint32_t)(phba->fc_ratov << 1);
6231 6260
6232 pring = &phba->sli.ring[LPFC_ELS_RING]; 6261 pring = &phba->sli.ring[LPFC_ELS_RING];
6233 6262 if ((phba->pport->load_flag & FC_UNLOADING))
6263 return;
6234 spin_lock_irq(&phba->hbalock); 6264 spin_lock_irq(&phba->hbalock);
6235 list_splice_init(&pring->txcmplq, &txcmplq_completions); 6265 if (phba->sli_rev == LPFC_SLI_REV4)
6236 spin_unlock_irq(&phba->hbalock); 6266 spin_lock(&pring->ring_lock);
6267
6268 if ((phba->pport->load_flag & FC_UNLOADING)) {
6269 if (phba->sli_rev == LPFC_SLI_REV4)
6270 spin_unlock(&pring->ring_lock);
6271 spin_unlock_irq(&phba->hbalock);
6272 return;
6273 }
6237 6274
6238 list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { 6275 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6239 cmd = &piocb->iocb; 6276 cmd = &piocb->iocb;
6240 6277
6241 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 6278 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6274 } 6311 }
6275 list_add_tail(&piocb->dlist, &abort_list); 6312 list_add_tail(&piocb->dlist, &abort_list);
6276 } 6313 }
6277 spin_lock_irq(&phba->hbalock); 6314 if (phba->sli_rev == LPFC_SLI_REV4)
6278 list_splice(&txcmplq_completions, &pring->txcmplq); 6315 spin_unlock(&pring->ring_lock);
6279 spin_unlock_irq(&phba->hbalock); 6316 spin_unlock_irq(&phba->hbalock);
6280 6317
6281 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 6318 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
6319 cmd = &piocb->iocb;
6282 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6320 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6283 "0127 ELS timeout Data: x%x x%x x%x " 6321 "0127 ELS timeout Data: x%x x%x x%x "
6284 "x%x\n", els_command, 6322 "x%x\n", els_command,
@@ -6290,8 +6328,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6290 } 6328 }
6291 6329
6292 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) 6330 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6293 mod_timer(&vport->els_tmofunc, 6331 if (!(phba->pport->load_flag & FC_UNLOADING))
6294 jiffies + msecs_to_jiffies(1000 * timeout)); 6332 mod_timer(&vport->els_tmofunc,
6333 jiffies + msecs_to_jiffies(1000 * timeout));
6295} 6334}
6296 6335
6297/** 6336/**
@@ -6317,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6317void 6356void
6318lpfc_els_flush_cmd(struct lpfc_vport *vport) 6357lpfc_els_flush_cmd(struct lpfc_vport *vport)
6319{ 6358{
6320 LIST_HEAD(completions); 6359 LIST_HEAD(abort_list);
6321 struct lpfc_hba *phba = vport->phba; 6360 struct lpfc_hba *phba = vport->phba;
6322 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6361 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6323 struct lpfc_iocbq *tmp_iocb, *piocb; 6362 struct lpfc_iocbq *tmp_iocb, *piocb;
6324 IOCB_t *cmd = NULL; 6363 IOCB_t *cmd = NULL;
6325 6364
6326 lpfc_fabric_abort_vport(vport); 6365 lpfc_fabric_abort_vport(vport);
6366 /*
6367 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
6368 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
6369 * ultimately grabs the ring_lock, the driver must splice the list into
6370 * a working list and release the locks before calling the abort.
6371 */
6372 spin_lock_irq(&phba->hbalock);
6373 if (phba->sli_rev == LPFC_SLI_REV4)
6374 spin_lock(&pring->ring_lock);
6375
6376 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6377 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6378 continue;
6379
6380 if (piocb->vport != vport)
6381 continue;
6382 list_add_tail(&piocb->dlist, &abort_list);
6383 }
6384 if (phba->sli_rev == LPFC_SLI_REV4)
6385 spin_unlock(&pring->ring_lock);
6386 spin_unlock_irq(&phba->hbalock);
6387 /* Abort each iocb on the aborted list and remove the dlist links. */
6388 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
6389 spin_lock_irq(&phba->hbalock);
6390 list_del_init(&piocb->dlist);
6391 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6392 spin_unlock_irq(&phba->hbalock);
6393 }
6394 if (!list_empty(&abort_list))
6395 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6396 "3387 abort list for txq not empty\n");
6397 INIT_LIST_HEAD(&abort_list);
6327 6398
6328 spin_lock_irq(&phba->hbalock); 6399 spin_lock_irq(&phba->hbalock);
6400 if (phba->sli_rev == LPFC_SLI_REV4)
6401 spin_lock(&pring->ring_lock);
6402
6329 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 6403 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6330 cmd = &piocb->iocb; 6404 cmd = &piocb->iocb;
6331 6405
@@ -6343,24 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
6343 if (piocb->vport != vport) 6417 if (piocb->vport != vport)
6344 continue; 6418 continue;
6345 6419
6346 list_move_tail(&piocb->list, &completions); 6420 list_del_init(&piocb->list);
6347 } 6421 list_add_tail(&piocb->list, &abort_list);
6348
6349 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6350 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
6351 continue;
6352 }
6353
6354 if (piocb->vport != vport)
6355 continue;
6356
6357 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6358 } 6422 }
6423 if (phba->sli_rev == LPFC_SLI_REV4)
6424 spin_unlock(&pring->ring_lock);
6359 spin_unlock_irq(&phba->hbalock); 6425 spin_unlock_irq(&phba->hbalock);
6360 6426
6361 /* Cancell all the IOCBs from the completions list */ 6427 /* Cancell all the IOCBs from the completions list */
6362 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6428 lpfc_sli_cancel_iocbs(phba, &abort_list,
6363 IOERR_SLI_ABORTED); 6429 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
6364 6430
6365 return; 6431 return;
6366} 6432}
@@ -6385,35 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
6385void 6451void
6386lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 6452lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6387{ 6453{
6388 LIST_HEAD(completions); 6454 struct lpfc_vport *vport;
6389 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6455 list_for_each_entry(vport, &phba->port_list, listentry)
6390 struct lpfc_iocbq *tmp_iocb, *piocb; 6456 lpfc_els_flush_cmd(vport);
6391 IOCB_t *cmd = NULL;
6392
6393 lpfc_fabric_abort_hba(phba);
6394 spin_lock_irq(&phba->hbalock);
6395 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
6396 cmd = &piocb->iocb;
6397 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6398 continue;
6399 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
6400 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
6401 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
6402 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
6403 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6404 continue;
6405 list_move_tail(&piocb->list, &completions);
6406 }
6407 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6408 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
6409 continue;
6410 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
6411 }
6412 spin_unlock_irq(&phba->hbalock);
6413
6414 /* Cancel all the IOCBs from the completions list */
6415 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6416 IOERR_SLI_ABORTED);
6417 6457
6418 return; 6458 return;
6419} 6459}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d9f237..59b51c529ba0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba)
674 lpfc_fdmi_timeout_handler(vport); 674 lpfc_fdmi_timeout_handler(vport);
675 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 675 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
676 lpfc_ramp_down_queue_handler(phba); 676 lpfc_ramp_down_queue_handler(phba);
677 if (work_port_events & WORKER_RAMP_UP_QUEUE)
678 lpfc_ramp_up_queue_handler(phba);
679 if (work_port_events & WORKER_DELAYED_DISC_TMO) 677 if (work_port_events & WORKER_DELAYED_DISC_TMO)
680 lpfc_delayed_disc_timeout_handler(vport); 678 lpfc_delayed_disc_timeout_handler(vport);
681 } 679 }
@@ -2545,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2545 if (!new_fcf_record) { 2543 if (!new_fcf_record) {
2546 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2544 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2547 "2766 Mailbox command READ_FCF_RECORD " 2545 "2766 Mailbox command READ_FCF_RECORD "
2548 "failed to retrieve a FCF record.\n"); 2546 "failed to retrieve a FCF record. "
2549 goto error_out; 2547 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2548 phba->fcf.fcf_flag);
2549 lpfc_unregister_fcf_rescan(phba);
2550 goto out;
2550 } 2551 }
2551 2552
2552 /* Get the needed parameters from FCF record */ 2553 /* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
3973 vport->fc_map_cnt += count; 3974 vport->fc_map_cnt += count;
3974 break; 3975 break;
3975 case NLP_STE_NPR_NODE: 3976 case NLP_STE_NPR_NODE:
3976 vport->fc_npr_cnt += count; 3977 if (vport->fc_npr_cnt == 0 && count == -1)
3978 vport->fc_npr_cnt = 0;
3979 else
3980 vport->fc_npr_cnt += count;
3977 break; 3981 break;
3978 } 3982 }
3979 spin_unlock_irq(shost->host_lock); 3983 spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4180 struct lpfc_hba *phba = vport->phba; 4184 struct lpfc_hba *phba = vport->phba;
4181 uint32_t did; 4185 uint32_t did;
4182 unsigned long flags; 4186 unsigned long flags;
4187 unsigned long *active_rrqs_xri_bitmap = NULL;
4183 4188
4184 if (!ndlp) 4189 if (!ndlp)
4185 return NULL; 4190 return NULL;
@@ -4208,12 +4213,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4208 4213
4209 /* Keep the original DID */ 4214 /* Keep the original DID */
4210 did = ndlp->nlp_DID; 4215 did = ndlp->nlp_DID;
4216 if (phba->sli_rev == LPFC_SLI_REV4)
4217 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4211 4218
4212 /* re-initialize ndlp except of ndlp linked list pointer */ 4219 /* re-initialize ndlp except of ndlp linked list pointer */
4213 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 4220 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4214 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 4221 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4215 lpfc_initialize_node(vport, ndlp, did); 4222 lpfc_initialize_node(vport, ndlp, did);
4216 4223
4224 if (phba->sli_rev == LPFC_SLI_REV4)
4225 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4226
4217 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4227 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4218 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4228 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4219 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 4229 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4799 ((uint32_t) ndlp->nlp_rpi & 0xff)); 4809 ((uint32_t) ndlp->nlp_rpi & 0xff));
4800 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4810 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4801 "0929 FIND node DID " 4811 "0929 FIND node DID "
4802 "Data: x%p x%x x%x x%x\n", 4812 "Data: x%p x%x x%x x%x %p\n",
4803 ndlp, ndlp->nlp_DID, 4813 ndlp, ndlp->nlp_DID,
4804 ndlp->nlp_flag, data1); 4814 ndlp->nlp_flag, data1,
4815 ndlp->active_rrqs_xri_bitmap);
4805 return ndlp; 4816 return ndlp;
4806 } 4817 }
4807 } 4818 }
@@ -5618,8 +5629,13 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5618 5629
5619 lpfc_initialize_node(vport, ndlp, did); 5630 lpfc_initialize_node(vport, ndlp, did);
5620 INIT_LIST_HEAD(&ndlp->nlp_listp); 5631 INIT_LIST_HEAD(&ndlp->nlp_listp);
5621 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5632 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5622 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 5633 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
5634 ndlp->active_rrqs_xri_bitmap =
5635 mempool_alloc(vport->phba->active_rrq_pool,
5636 GFP_KERNEL);
5637 }
5638
5623 5639
5624 5640
5625 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 5641 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@ lpfc_nlp_release(struct kref *kref)
5664 /* free ndlp memory for final ndlp release */ 5680 /* free ndlp memory for final ndlp release */
5665 if (NLP_CHK_FREE_REQ(ndlp)) { 5681 if (NLP_CHK_FREE_REQ(ndlp)) {
5666 kfree(ndlp->lat_data); 5682 kfree(ndlp->lat_data);
5683 if (phba->sli_rev == LPFC_SLI_REV4)
5684 mempool_free(ndlp->active_rrqs_xri_bitmap,
5685 ndlp->phba->active_rrq_pool);
5667 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 5686 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
5668 } 5687 }
5669} 5688}
@@ -6170,10 +6189,6 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6170 6189
6171 memcpy(&conn_entry->conn_rec, &conn_rec[i], 6190 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6172 sizeof(struct lpfc_fcf_conn_rec)); 6191 sizeof(struct lpfc_fcf_conn_rec));
6173 conn_entry->conn_rec.vlan_tag =
6174 conn_entry->conn_rec.vlan_tag;
6175 conn_entry->conn_rec.flags =
6176 conn_entry->conn_rec.flags;
6177 list_add_tail(&conn_entry->list, 6192 list_add_tail(&conn_entry->list,
6178 &phba->fcf_conn_rec_list); 6193 &phba->fcf_conn_rec_list);
6179 } 6194 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d30ca69..3d9438ce59ab 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ 45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ 46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
47#define LPFC_FCP_NEXT_RING 3 47#define LPFC_FCP_NEXT_RING 3
48#define LPFC_FCP_OAS_RING 3
48 49
49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ 50#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ 51#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b116d328..fd79f7de7666 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
2616#define cfg_phwq_SHIFT 15 2616#define cfg_phwq_SHIFT 15
2617#define cfg_phwq_MASK 0x00000001 2617#define cfg_phwq_MASK 0x00000001
2618#define cfg_phwq_WORD word12 2618#define cfg_phwq_WORD word12
2619#define cfg_oas_SHIFT 25
2620#define cfg_oas_MASK 0x00000001
2621#define cfg_oas_WORD word12
2619#define cfg_loopbk_scope_SHIFT 28 2622#define cfg_loopbk_scope_SHIFT 28
2620#define cfg_loopbk_scope_MASK 0x0000000f 2623#define cfg_loopbk_scope_MASK 0x0000000f
2621#define cfg_loopbk_scope_WORD word12 2624#define cfg_loopbk_scope_WORD word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
3322#define wqe_ebde_cnt_SHIFT 0 3325#define wqe_ebde_cnt_SHIFT 0
3323#define wqe_ebde_cnt_MASK 0x0000000f 3326#define wqe_ebde_cnt_MASK 0x0000000f
3324#define wqe_ebde_cnt_WORD word10 3327#define wqe_ebde_cnt_WORD word10
3328#define wqe_oas_SHIFT 6
3329#define wqe_oas_MASK 0x00000001
3330#define wqe_oas_WORD word10
3325#define wqe_lenloc_SHIFT 7 3331#define wqe_lenloc_SHIFT 7
3326#define wqe_lenloc_MASK 0x00000003 3332#define wqe_lenloc_MASK 0x00000003
3327#define wqe_lenloc_WORD word10 3333#define wqe_lenloc_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc85c35..635eeb3d6987 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
81static void lpfc_sli4_disable_intr(struct lpfc_hba *); 81static void lpfc_sli4_disable_intr(struct lpfc_hba *);
82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
83static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
83 84
84static struct scsi_transport_template *lpfc_transport_template = NULL; 85static struct scsi_transport_template *lpfc_transport_template = NULL;
85static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 86static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@ lpfc_rrq_timeout(unsigned long ptr)
1005 1006
1006 phba = (struct lpfc_hba *)ptr; 1007 phba = (struct lpfc_hba *)ptr;
1007 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1008 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1008 phba->hba_flag |= HBA_RRQ_ACTIVE; 1009 if (!(phba->pport->load_flag & FC_UNLOADING))
1010 phba->hba_flag |= HBA_RRQ_ACTIVE;
1011 else
1012 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1009 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1013 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1010 lpfc_worker_wake_up(phba); 1014
1015 if (!(phba->pport->load_flag & FC_UNLOADING))
1016 lpfc_worker_wake_up(phba);
1011} 1017}
1012 1018
1013/** 1019/**
@@ -1468,7 +1474,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1468 * for handling possible port resource change. 1474 * for handling possible port resource change.
1469 **/ 1475 **/
1470static int 1476static int
1471lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) 1477lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1478 bool en_rn_msg)
1472{ 1479{
1473 int rc; 1480 int rc;
1474 uint32_t intr_mode; 1481 uint32_t intr_mode;
@@ -1480,9 +1487,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
1480 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1487 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1481 if (!rc) { 1488 if (!rc) {
1482 /* need reset: attempt for port recovery */ 1489 /* need reset: attempt for port recovery */
1483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1490 if (en_rn_msg)
1484 "2887 Reset Needed: Attempting Port " 1491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1485 "Recovery...\n"); 1492 "2887 Reset Needed: Attempting Port "
1493 "Recovery...\n");
1486 lpfc_offline_prep(phba, mbx_action); 1494 lpfc_offline_prep(phba, mbx_action);
1487 lpfc_offline(phba); 1495 lpfc_offline(phba);
1488 /* release interrupt for possible resource change */ 1496 /* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1522 uint32_t reg_err1, reg_err2; 1530 uint32_t reg_err1, reg_err2;
1523 uint32_t uerrlo_reg, uemasklo_reg; 1531 uint32_t uerrlo_reg, uemasklo_reg;
1524 uint32_t pci_rd_rc1, pci_rd_rc2; 1532 uint32_t pci_rd_rc1, pci_rd_rc2;
1533 bool en_rn_msg = true;
1525 int rc; 1534 int rc;
1526 1535
1527 /* If the pci channel is offline, ignore possible errors, since 1536 /* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1572 break; 1581 break;
1573 } 1582 }
1574 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1583 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1575 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) 1584 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1577 "3143 Port Down: Firmware Restarted\n"); 1586 "3143 Port Down: Firmware Update "
1578 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1587 "Detected\n");
1588 en_rn_msg = false;
1589 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1579 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1590 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1581 "3144 Port Down: Debug Dump\n"); 1592 "3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1585 "3145 Port Down: Provisioning\n"); 1596 "3145 Port Down: Provisioning\n");
1586 1597
1587 /* Check port status register for function reset */ 1598 /* Check port status register for function reset */
1588 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT); 1599 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1600 en_rn_msg);
1589 if (rc == 0) { 1601 if (rc == 0) {
1590 /* don't report event on forced debug dump */ 1602 /* don't report event on forced debug dump */
1591 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1603 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4856 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4868 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4857 struct lpfc_mqe *mqe; 4869 struct lpfc_mqe *mqe;
4858 int longs; 4870 int longs;
4871 int fof_vectors = 0;
4859 4872
4860 /* Get all the module params for configuring this host */ 4873 /* Get all the module params for configuring this host */
4861 lpfc_get_cfgparam(phba); 4874 lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5061 rc = lpfc_sli4_read_config(phba); 5074 rc = lpfc_sli4_read_config(phba);
5062 if (unlikely(rc)) 5075 if (unlikely(rc))
5063 goto out_free_bsmbx; 5076 goto out_free_bsmbx;
5077 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5078 if (unlikely(rc))
5079 goto out_free_bsmbx;
5064 5080
5065 /* IF Type 0 ports get initialized now. */ 5081 /* IF Type 0 ports get initialized now. */
5066 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5082 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5118 } 5134 }
5119 } 5135 }
5120 mempool_free(mboxq, phba->mbox_mem_pool); 5136 mempool_free(mboxq, phba->mbox_mem_pool);
5137
5138 /* Verify OAS is supported */
5139 lpfc_sli4_oas_verify(phba);
5140 if (phba->cfg_fof)
5141 fof_vectors = 1;
5142
5121 /* Verify all the SLI4 queues */ 5143 /* Verify all the SLI4 queues */
5122 rc = lpfc_sli4_queue_verify(phba); 5144 rc = lpfc_sli4_queue_verify(phba);
5123 if (rc) 5145 if (rc)
@@ -5159,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5159 5181
5160 phba->sli4_hba.fcp_eq_hdl = 5182 phba->sli4_hba.fcp_eq_hdl =
5161 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5183 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5162 phba->cfg_fcp_io_channel), GFP_KERNEL); 5184 (fof_vectors + phba->cfg_fcp_io_channel)),
5185 GFP_KERNEL);
5163 if (!phba->sli4_hba.fcp_eq_hdl) { 5186 if (!phba->sli4_hba.fcp_eq_hdl) {
5164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5165 "2572 Failed allocate memory for " 5188 "2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5169 } 5192 }
5170 5193
5171 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5194 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5172 phba->cfg_fcp_io_channel), GFP_KERNEL); 5195 (fof_vectors +
5196 phba->cfg_fcp_io_channel)), GFP_KERNEL);
5173 if (!phba->sli4_hba.msix_entries) { 5197 if (!phba->sli4_hba.msix_entries) {
5174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5175 "2573 Failed allocate memory for msi-x " 5199 "2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5267 kfree(phba->sli4_hba.cpu_map); 5291 kfree(phba->sli4_hba.cpu_map);
5268 phba->sli4_hba.num_present_cpu = 0; 5292 phba->sli4_hba.num_present_cpu = 0;
5269 phba->sli4_hba.num_online_cpu = 0; 5293 phba->sli4_hba.num_online_cpu = 0;
5294 phba->sli4_hba.curr_disp_cpu = 0;
5270 5295
5271 /* Free memory allocated for msi-x interrupt vector entries */ 5296 /* Free memory allocated for msi-x interrupt vector entries */
5272 kfree(phba->sli4_hba.msix_entries); 5297 kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5390 /* Initialize FCF connection rec list */ 5415 /* Initialize FCF connection rec list */
5391 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5416 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5392 5417
5418 /* Initialize OAS configuration list */
5419 spin_lock_init(&phba->devicelock);
5420 INIT_LIST_HEAD(&phba->luns);
5421
5393 return 0; 5422 return 0;
5394} 5423}
5395 5424
@@ -6816,6 +6845,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6816 int cfg_fcp_io_channel; 6845 int cfg_fcp_io_channel;
6817 uint32_t cpu; 6846 uint32_t cpu;
6818 uint32_t i = 0; 6847 uint32_t i = 0;
6848 int fof_vectors = phba->cfg_fof ? 1 : 0;
6819 6849
6820 /* 6850 /*
6821 * Sanity check for configured queue parameters against the run-time 6851 * Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6832 } 6862 }
6833 phba->sli4_hba.num_online_cpu = i; 6863 phba->sli4_hba.num_online_cpu = i;
6834 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 6864 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6865 phba->sli4_hba.curr_disp_cpu = 0;
6835 6866
6836 if (i < cfg_fcp_io_channel) { 6867 if (i < cfg_fcp_io_channel) {
6837 lpfc_printf_log(phba, 6868 lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6842 cfg_fcp_io_channel = i; 6873 cfg_fcp_io_channel = i;
6843 } 6874 }
6844 6875
6845 if (cfg_fcp_io_channel > 6876 if (cfg_fcp_io_channel + fof_vectors >
6846 phba->sli4_hba.max_cfg_param.max_eq) { 6877 phba->sli4_hba.max_cfg_param.max_eq) {
6847 if (phba->sli4_hba.max_cfg_param.max_eq < 6878 if (phba->sli4_hba.max_cfg_param.max_eq <
6848 LPFC_FCP_IO_CHAN_MIN) { 6879 LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6859 "available EQs: from %d to %d\n", 6890 "available EQs: from %d to %d\n",
6860 cfg_fcp_io_channel, 6891 cfg_fcp_io_channel,
6861 phba->sli4_hba.max_cfg_param.max_eq); 6892 phba->sli4_hba.max_cfg_param.max_eq);
6862 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6893 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
6894 fof_vectors;
6863 } 6895 }
6864 6896
6865 /* The actual number of FCP event queues adopted */ 6897 /* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
7070 } 7102 }
7071 phba->sli4_hba.dat_rq = qdesc; 7103 phba->sli4_hba.dat_rq = qdesc;
7072 7104
7105 /* Create the Queues needed for Flash Optimized Fabric operations */
7106 if (phba->cfg_fof)
7107 lpfc_fof_queue_create(phba);
7073 return 0; 7108 return 0;
7074 7109
7075out_error: 7110out_error:
@@ -7094,6 +7129,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7094{ 7129{
7095 int idx; 7130 int idx;
7096 7131
7132 if (phba->cfg_fof)
7133 lpfc_fof_queue_destroy(phba);
7134
7097 if (phba->sli4_hba.hba_eq != NULL) { 7135 if (phba->sli4_hba.hba_eq != NULL) {
7098 /* Release HBA event queue */ 7136 /* Release HBA event queue */
7099 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7137 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7478 phba->sli4_hba.hdr_rq->queue_id, 7516 phba->sli4_hba.hdr_rq->queue_id,
7479 phba->sli4_hba.dat_rq->queue_id, 7517 phba->sli4_hba.dat_rq->queue_id,
7480 phba->sli4_hba.els_cq->queue_id); 7518 phba->sli4_hba.els_cq->queue_id);
7519
7520 if (phba->cfg_fof) {
7521 rc = lpfc_fof_queue_setup(phba);
7522 if (rc) {
7523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7524 "0549 Failed setup of FOF Queues: "
7525 "rc = 0x%x\n", rc);
7526 goto out_destroy_els_rq;
7527 }
7528 }
7481 return 0; 7529 return 0;
7482 7530
7531out_destroy_els_rq:
7532 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7483out_destroy_els_wq: 7533out_destroy_els_wq:
7484 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7534 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7485out_destroy_mbx_wq: 7535out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7518{ 7568{
7519 int fcp_qidx; 7569 int fcp_qidx;
7520 7570
7571 /* Unset the queues created for Flash Optimized Fabric operations */
7572 if (phba->cfg_fof)
7573 lpfc_fof_queue_destroy(phba);
7521 /* Unset mailbox command work queue */ 7574 /* Unset mailbox command work queue */
7522 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7575 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7523 /* Unset ELS work queue */ 7576 /* Unset ELS work queue */
@@ -8635,6 +8688,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8635 8688
8636 /* Configure MSI-X capability structure */ 8689 /* Configure MSI-X capability structure */
8637 vectors = phba->cfg_fcp_io_channel; 8690 vectors = phba->cfg_fcp_io_channel;
8691 if (phba->cfg_fof) {
8692 phba->sli4_hba.msix_entries[index].entry = index;
8693 vectors++;
8694 }
8638enable_msix_vectors: 8695enable_msix_vectors:
8639 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8696 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8640 vectors); 8697 vectors);
@@ -8664,7 +8721,15 @@ enable_msix_vectors:
8664 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8721 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8665 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8722 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8666 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 8723 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8667 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8724 if (phba->cfg_fof && (index == (vectors - 1)))
8725 rc = request_irq(
8726 phba->sli4_hba.msix_entries[index].vector,
8727 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
8728 (char *)&phba->sli4_hba.handler_name[index],
8729 &phba->sli4_hba.fcp_eq_hdl[index]);
8730 else
8731 rc = request_irq(
8732 phba->sli4_hba.msix_entries[index].vector,
8668 &lpfc_sli4_hba_intr_handler, IRQF_SHARED, 8733 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8669 (char *)&phba->sli4_hba.handler_name[index], 8734 (char *)&phba->sli4_hba.handler_name[index],
8670 &phba->sli4_hba.fcp_eq_hdl[index]); 8735 &phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@ enable_msix_vectors:
8676 } 8741 }
8677 } 8742 }
8678 8743
8744 if (phba->cfg_fof)
8745 vectors--;
8746
8679 if (vectors != phba->cfg_fcp_io_channel) { 8747 if (vectors != phba->cfg_fcp_io_channel) {
8680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8681 "3238 Reducing IO channels to match number of " 8749 "3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8721 free_irq(phba->sli4_hba.msix_entries[index].vector, 8789 free_irq(phba->sli4_hba.msix_entries[index].vector,
8722 &phba->sli4_hba.fcp_eq_hdl[index]); 8790 &phba->sli4_hba.fcp_eq_hdl[index]);
8723 } 8791 }
8724 8792 if (phba->cfg_fof) {
8793 free_irq(phba->sli4_hba.msix_entries[index].vector,
8794 &phba->sli4_hba.fcp_eq_hdl[index]);
8795 }
8725 /* Disable MSI-X */ 8796 /* Disable MSI-X */
8726 pci_disable_msix(phba->pcidev); 8797 pci_disable_msix(phba->pcidev);
8727 8798
@@ -8771,6 +8842,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8771 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8842 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8772 } 8843 }
8773 8844
8845 if (phba->cfg_fof) {
8846 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8847 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8848 }
8774 return 0; 8849 return 0;
8775} 8850}
8776 8851
@@ -8853,6 +8928,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8853 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 8928 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8854 fcp_eq_in_use, 1); 8929 fcp_eq_in_use, 1);
8855 } 8930 }
8931 if (phba->cfg_fof) {
8932 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8933 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8934 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8935 fcp_eq_in_use, 1);
8936 }
8856 } 8937 }
8857 } 8938 }
8858 return intr_mode; 8939 return intr_mode;
@@ -9163,6 +9244,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9163 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9244 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9164 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9245 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9165 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9246 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9247 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
9166 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9248 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9167 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9249 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9168 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9250 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@ lpfc_io_resume(struct pci_dev *pdev)
10796 return; 10878 return;
10797} 10879}
10798 10880
10881/**
10882 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
10883 * @phba: pointer to lpfc hba data structure.
10884 *
10885 * This routine checks to see if OAS is supported for this adapter. If
10886 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
10887 * the enable oas flag is cleared and the pool created for OAS device data
10888 * is destroyed.
10889 *
10890 **/
10891void
10892lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10893{
10894
10895 if (!phba->cfg_EnableXLane)
10896 return;
10897
10898 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10899 phba->cfg_fof = 1;
10900 } else {
10901 phba->cfg_EnableXLane = 0;
10902 if (phba->device_data_mem_pool)
10903 mempool_destroy(phba->device_data_mem_pool);
10904 phba->device_data_mem_pool = NULL;
10905 }
10906
10907 return;
10908}
10909
10910/**
10911 * lpfc_fof_queue_setup - Set up all the fof queues
10912 * @phba: pointer to lpfc hba data structure.
10913 *
10914 * This routine is invoked to set up all the fof queues for the FC HBA
10915 * operation.
10916 *
10917 * Return codes
10918 * 0 - successful
10919 * -ENOMEM - No available memory
10920 **/
10921int
10922lpfc_fof_queue_setup(struct lpfc_hba *phba)
10923{
10924 struct lpfc_sli *psli = &phba->sli;
10925 int rc;
10926
10927 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
10928 if (rc)
10929 return -ENOMEM;
10930
10931 if (phba->cfg_EnableXLane) {
10932
10933 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
10934 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
10935 if (rc)
10936 goto out_oas_cq;
10937
10938 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
10939 phba->sli4_hba.oas_cq, LPFC_FCP);
10940 if (rc)
10941 goto out_oas_wq;
10942
10943 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
10944 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
10945 }
10946
10947 return 0;
10948
10949out_oas_wq:
10950 if (phba->cfg_EnableXLane)
10951 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10952out_oas_cq:
10953 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
10954 return rc;
10955
10956}
10957
10958/**
10959 * lpfc_fof_queue_create - Create all the fof queues
10960 * @phba: pointer to lpfc hba data structure.
10961 *
10962 * This routine is invoked to allocate all the fof queues for the FC HBA
10963 * operation. For each SLI4 queue type, the parameters such as queue entry
10964 * count (queue depth) shall be taken from the module parameter. For now,
10965 * we just use some constant number as place holder.
10966 *
10967 * Return codes
10968 * 0 - successful
10969 * -ENOMEM - No availble memory
10970 * -EIO - The mailbox failed to complete successfully.
10971 **/
10972int
10973lpfc_fof_queue_create(struct lpfc_hba *phba)
10974{
10975 struct lpfc_queue *qdesc;
10976
10977 /* Create FOF EQ */
10978 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
10979 phba->sli4_hba.eq_ecount);
10980 if (!qdesc)
10981 goto out_error;
10982
10983 phba->sli4_hba.fof_eq = qdesc;
10984
10985 if (phba->cfg_EnableXLane) {
10986
10987 /* Create OAS CQ */
10988 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
10989 phba->sli4_hba.cq_ecount);
10990 if (!qdesc)
10991 goto out_error;
10992
10993 phba->sli4_hba.oas_cq = qdesc;
10994
10995 /* Create OAS WQ */
10996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
10997 phba->sli4_hba.wq_ecount);
10998 if (!qdesc)
10999 goto out_error;
11000
11001 phba->sli4_hba.oas_wq = qdesc;
11002
11003 }
11004 return 0;
11005
11006out_error:
11007 lpfc_fof_queue_destroy(phba);
11008 return -ENOMEM;
11009}
11010
11011/**
11012 * lpfc_fof_queue_destroy - Destroy all the fof queues
11013 * @phba: pointer to lpfc hba data structure.
11014 *
11015 * This routine is invoked to release all the SLI4 queues with the FC HBA
11016 * operation.
11017 *
11018 * Return codes
11019 * 0 - successful
11020 **/
11021int
11022lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11023{
11024 /* Release FOF Event queue */
11025 if (phba->sli4_hba.fof_eq != NULL) {
11026 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11027 phba->sli4_hba.fof_eq = NULL;
11028 }
11029
11030 /* Release OAS Completion queue */
11031 if (phba->sli4_hba.oas_cq != NULL) {
11032 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11033 phba->sli4_hba.oas_cq = NULL;
11034 }
11035
11036 /* Release OAS Work queue */
11037 if (phba->sli4_hba.oas_wq != NULL) {
11038 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11039 phba->sli4_hba.oas_wq = NULL;
11040 }
11041 return 0;
11042}
11043
10799static struct pci_device_id lpfc_id_table[] = { 11044static struct pci_device_id lpfc_id_table[] = {
10800 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 11045 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10801 PCI_ANY_ID, PCI_ANY_ID, }, 11046 PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd7c86d..ed419aad2b1f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
38#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
39#include "lpfc.h" 39#include "lpfc.h"
40#include "lpfc_crtn.h" 40#include "lpfc_crtn.h"
41#include "lpfc_logmsg.h"
41 42
42#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 43#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
43#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 44#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
45#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
44 46
47int
48lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
49 size_t bytes;
50 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
51
52 if (max_xri <= 0)
53 return -ENOMEM;
54 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
55 sizeof(unsigned long);
56 phba->cfg_rrq_xri_bitmap_sz = bytes;
57 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
58 bytes);
59 if (!phba->active_rrq_pool)
60 return -ENOMEM;
61 else
62 return 0;
63}
45 64
46/** 65/**
47 * lpfc_mem_alloc - create and allocate all PCI and memory pools 66 * lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
146 phba->lpfc_drb_pool = NULL; 165 phba->lpfc_drb_pool = NULL;
147 } 166 }
148 167
168 if (phba->cfg_EnableXLane) {
169 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
170 LPFC_DEVICE_DATA_POOL_SIZE,
171 sizeof(struct lpfc_device_data));
172 if (!phba->device_data_mem_pool)
173 goto fail_free_hrb_pool;
174 } else {
175 phba->device_data_mem_pool = NULL;
176 }
177
149 return 0; 178 return 0;
150 fail_free_hrb_pool: 179 fail_free_hrb_pool:
151 pci_pool_destroy(phba->lpfc_hrb_pool); 180 pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
188{ 217{
189 int i; 218 int i;
190 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 219 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
220 struct lpfc_device_data *device_data;
191 221
192 /* Free HBQ pools */ 222 /* Free HBQ pools */
193 lpfc_sli_hbqbuf_free_all(phba); 223 lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
209 /* Free NLP memory pool */ 239 /* Free NLP memory pool */
210 mempool_destroy(phba->nlp_mem_pool); 240 mempool_destroy(phba->nlp_mem_pool);
211 phba->nlp_mem_pool = NULL; 241 phba->nlp_mem_pool = NULL;
242 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
243 mempool_destroy(phba->active_rrq_pool);
244 phba->active_rrq_pool = NULL;
245 }
212 246
213 /* Free mbox memory pool */ 247 /* Free mbox memory pool */
214 mempool_destroy(phba->mbox_mem_pool); 248 mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
227 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 261 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
228 phba->lpfc_scsi_dma_buf_pool = NULL; 262 phba->lpfc_scsi_dma_buf_pool = NULL;
229 263
264 /* Free Device Data memory pool */
265 if (phba->device_data_mem_pool) {
266 /* Ensure all objects have been returned to the pool */
267 while (!list_empty(&phba->luns)) {
268 device_data = list_first_entry(&phba->luns,
269 struct lpfc_device_data,
270 listentry);
271 list_del(&device_data->listentry);
272 mempool_free(device_data, phba->device_data_mem_pool);
273 }
274 mempool_destroy(phba->device_data_mem_pool);
275 }
276 phba->device_data_mem_pool = NULL;
230 return; 277 return;
231} 278}
232 279
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc361259d6d..c342f6afd747 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
203int 203int
204lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 204lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205{ 205{
206 LIST_HEAD(completions);
207 LIST_HEAD(txcmplq_completions);
208 LIST_HEAD(abort_list); 206 LIST_HEAD(abort_list);
209 struct lpfc_sli *psli = &phba->sli; 207 struct lpfc_sli *psli = &phba->sli;
210 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 208 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
216 "Data: x%x x%x x%x\n", 214 "Data: x%x x%x x%x\n",
217 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 215 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
218 ndlp->nlp_rpi); 216 ndlp->nlp_rpi);
219 217 /* Clean up all fabric IOs first.*/
220 lpfc_fabric_abort_nport(ndlp); 218 lpfc_fabric_abort_nport(ndlp);
221 219
222 /* First check the txq */ 220 /*
221 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
222 * of all ELS IOs that need an ABTS. The IOs need to stay on the
223 * txcmplq so that the abort operation completes them successfully.
224 */
223 spin_lock_irq(&phba->hbalock); 225 spin_lock_irq(&phba->hbalock);
224 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 226 if (phba->sli_rev == LPFC_SLI_REV4)
225 /* Check to see if iocb matches the nport we are looking for */ 227 spin_lock(&pring->ring_lock);
226 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 228 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
227 /* It matches, so deque and call compl with anp error */ 229 /* Add to abort_list on on NDLP match. */
228 list_move_tail(&iocb->list, &completions);
229 }
230 }
231
232 /* Next check the txcmplq */
233 list_splice_init(&pring->txcmplq, &txcmplq_completions);
234 spin_unlock_irq(&phba->hbalock);
235
236 list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
237 /* Check to see if iocb matches the nport we are looking for */
238 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) 230 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
239 list_add_tail(&iocb->dlist, &abort_list); 231 list_add_tail(&iocb->dlist, &abort_list);
240 } 232 }
241 spin_lock_irq(&phba->hbalock); 233 if (phba->sli_rev == LPFC_SLI_REV4)
242 list_splice(&txcmplq_completions, &pring->txcmplq); 234 spin_unlock(&pring->ring_lock);
243 spin_unlock_irq(&phba->hbalock); 235 spin_unlock_irq(&phba->hbalock);
244 236
237 /* Abort the targeted IOs and remove them from the abort list. */
245 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { 238 list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
246 spin_lock_irq(&phba->hbalock); 239 spin_lock_irq(&phba->hbalock);
247 list_del_init(&iocb->dlist); 240 list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
249 spin_unlock_irq(&phba->hbalock); 242 spin_unlock_irq(&phba->hbalock);
250 } 243 }
251 244
245 INIT_LIST_HEAD(&abort_list);
246
247 /* Now process the txq */
248 spin_lock_irq(&phba->hbalock);
249 if (phba->sli_rev == LPFC_SLI_REV4)
250 spin_lock(&pring->ring_lock);
251
252 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
253 /* Check to see if iocb matches the nport we are looking for */
254 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
255 list_del_init(&iocb->list);
256 list_add_tail(&iocb->list, &abort_list);
257 }
258 }
259
260 if (phba->sli_rev == LPFC_SLI_REV4)
261 spin_unlock(&pring->ring_lock);
262 spin_unlock_irq(&phba->hbalock);
263
252 /* Cancel all the IOCBs from the completions list */ 264 /* Cancel all the IOCBs from the completions list */
253 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 265 lpfc_sli_cancel_iocbs(phba, &abort_list,
254 IOERR_SLI_ABORTED); 266 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
255 267
256 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 268 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
257 return 0; 269 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05a5f0a..462453ee0bda 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
68 __be32 ref_tag; /* Target LBA or indirect LBA */ 68 __be32 ref_tag; /* Target LBA or indirect LBA */
69}; 69};
70 70
71static struct lpfc_rport_data *
72lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73{
74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75
76 if (vport->phba->cfg_EnableXLane)
77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 else
79 return (struct lpfc_rport_data *)sdev->hostdata;
80}
81
71static void 82static void
72lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 83lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73static void 84static void
@@ -304,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
304 unsigned long new_queue_depth, old_queue_depth; 315 unsigned long new_queue_depth, old_queue_depth;
305 316
306 old_queue_depth = sdev->queue_depth; 317 old_queue_depth = sdev->queue_depth;
307 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 318
319 switch (reason) {
320 case SCSI_QDEPTH_DEFAULT:
321 /* change request from sysfs, fall through */
322 case SCSI_QDEPTH_RAMP_UP:
323 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
324 break;
325 case SCSI_QDEPTH_QFULL:
326 if (scsi_track_queue_full(sdev, qdepth) == 0)
327 return sdev->queue_depth;
328
329 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
330 "0711 detected queue full - lun queue "
331 "depth adjusted to %d.\n", sdev->queue_depth);
332 break;
333 default:
334 return -EOPNOTSUPP;
335 }
336
308 new_queue_depth = sdev->queue_depth; 337 new_queue_depth = sdev->queue_depth;
309 rdata = sdev->hostdata; 338 rdata = lpfc_rport_data_from_scsi_device(sdev);
310 if (rdata) 339 if (rdata)
311 lpfc_send_sdev_queuedepth_change_event(phba, vport, 340 lpfc_send_sdev_queuedepth_change_event(phba, vport,
312 rdata->pnode, sdev->lun, 341 rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
377} 406}
378 407
379/** 408/**
380 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
381 * @phba: The Hba for which this call is being executed.
382 *
383 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
384 * post at most 1 event every 5 minute after last_ramp_up_time or
385 * last_rsrc_error_time. This routine wakes up worker thread of @phba
386 * to process WORKER_RAM_DOWN_EVENT event.
387 *
388 * This routine should be called with no lock held.
389 **/
390static inline void
391lpfc_rampup_queue_depth(struct lpfc_vport *vport,
392 uint32_t queue_depth)
393{
394 unsigned long flags;
395 struct lpfc_hba *phba = vport->phba;
396 uint32_t evt_posted;
397 atomic_inc(&phba->num_cmd_success);
398
399 if (vport->cfg_lun_queue_depth <= queue_depth)
400 return;
401 spin_lock_irqsave(&phba->hbalock, flags);
402 if (time_before(jiffies,
403 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
404 time_before(jiffies,
405 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
406 spin_unlock_irqrestore(&phba->hbalock, flags);
407 return;
408 }
409 phba->last_ramp_up_time = jiffies;
410 spin_unlock_irqrestore(&phba->hbalock, flags);
411
412 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
413 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
414 if (!evt_posted)
415 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
416 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
417
418 if (!evt_posted)
419 lpfc_worker_wake_up(phba);
420 return;
421}
422
423/**
424 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 409 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
425 * @phba: The Hba for which this call is being executed. 410 * @phba: The Hba for which this call is being executed.
426 * 411 *
@@ -472,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
472} 457}
473 458
474/** 459/**
475 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
476 * @phba: The Hba for which this call is being executed.
477 *
478 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
479 * thread.This routine increases queue depth for all scsi device on each vport
480 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
481 * num_cmd_success to zero.
482 **/
483void
484lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
485{
486 struct lpfc_vport **vports;
487 struct Scsi_Host *shost;
488 struct scsi_device *sdev;
489 int i;
490
491 vports = lpfc_create_vport_work_array(phba);
492 if (vports != NULL)
493 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
494 shost = lpfc_shost_from_vport(vports[i]);
495 shost_for_each_device(sdev, shost) {
496 if (vports[i]->cfg_lun_queue_depth <=
497 sdev->queue_depth)
498 continue;
499 lpfc_change_queue_depth(sdev,
500 sdev->queue_depth+1,
501 SCSI_QDEPTH_RAMP_UP);
502 }
503 }
504 lpfc_destroy_vport_work_array(phba, vports);
505 atomic_set(&phba->num_rsrc_err, 0);
506 atomic_set(&phba->num_cmd_success, 0);
507}
508
509/**
510 * lpfc_scsi_dev_block - set all scsi hosts to block state 460 * lpfc_scsi_dev_block - set all scsi hosts to block state
511 * @phba: Pointer to HBA context object. 461 * @phba: Pointer to HBA context object.
512 * 462 *
@@ -1502,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1502 } 1452 }
1503 1453
1504 /* Next check if we need to match the remote NPortID or WWPN */ 1454 /* Next check if we need to match the remote NPortID or WWPN */
1505 rdata = sc->device->hostdata; 1455 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1506 if (rdata && rdata->pnode) { 1456 if (rdata && rdata->pnode) {
1507 ndlp = rdata->pnode; 1457 ndlp = rdata->pnode;
1508 1458
@@ -3507,6 +3457,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3507 * we need to set word 4 of IOCB here 3457 * we need to set word 4 of IOCB here
3508 */ 3458 */
3509 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 3459 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3460
3461 /*
3462 * If the OAS driver feature is enabled and the lun is enabled for
3463 * OAS, set the oas iocb related flags.
3464 */
3465 if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
3466 scsi_cmnd->device->hostdata)->oas_enabled)
3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
3510 return 0; 3468 return 0;
3511} 3469}
3512 3470
@@ -4021,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4021 struct lpfc_nodelist *pnode = rdata->pnode; 3979 struct lpfc_nodelist *pnode = rdata->pnode;
4022 struct scsi_cmnd *cmd; 3980 struct scsi_cmnd *cmd;
4023 int result; 3981 int result;
4024 struct scsi_device *tmp_sdev;
4025 int depth; 3982 int depth;
4026 unsigned long flags; 3983 unsigned long flags;
4027 struct lpfc_fast_path_event *fast_path_evt; 3984 struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4266 return; 4223 return;
4267 } 4224 }
4268 4225
4269 if (!result)
4270 lpfc_rampup_queue_depth(vport, queue_depth);
4271
4272 /*
4273 * Check for queue full. If the lun is reporting queue full, then
4274 * back off the lun queue depth to prevent target overloads.
4275 */
4276 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
4277 NLP_CHK_NODE_ACT(pnode)) {
4278 shost_for_each_device(tmp_sdev, shost) {
4279 if (tmp_sdev->id != scsi_id)
4280 continue;
4281 depth = scsi_track_queue_full(tmp_sdev,
4282 tmp_sdev->queue_depth-1);
4283 if (depth <= 0)
4284 continue;
4285 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4286 "0711 detected queue full - lun queue "
4287 "depth adjusted to %d.\n", depth);
4288 lpfc_send_sdev_queuedepth_change_event(phba, vport,
4289 pnode,
4290 tmp_sdev->lun,
4291 depth+1, depth);
4292 }
4293 }
4294
4295 spin_lock_irqsave(&phba->hbalock, flags); 4226 spin_lock_irqsave(&phba->hbalock, flags);
4296 lpfc_cmd->pCmd = NULL; 4227 lpfc_cmd->pCmd = NULL;
4297 spin_unlock_irqrestore(&phba->hbalock, flags); 4228 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4492 } 4423 }
4493 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4424 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4494 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4425 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4426 piocb->ulpPU = 0;
4427 piocb->un.fcpi.fcpi_parm = 0;
4495 4428
4496 /* ulpTimeout is only one byte */ 4429 /* ulpTimeout is only one byte */
4497 if (lpfc_cmd->timeout > 0xff) { 4430 if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4691{ 4624{
4692 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4625 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4693 struct lpfc_hba *phba = vport->phba; 4626 struct lpfc_hba *phba = vport->phba;
4694 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 4627 struct lpfc_rport_data *rdata;
4695 struct lpfc_nodelist *ndlp; 4628 struct lpfc_nodelist *ndlp;
4696 struct lpfc_scsi_buf *lpfc_cmd; 4629 struct lpfc_scsi_buf *lpfc_cmd;
4697 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 4630 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4698 int err; 4631 int err;
4699 4632
4633 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4700 err = fc_remote_port_chkready(rport); 4634 err = fc_remote_port_chkready(rport);
4701 if (err) { 4635 if (err) {
4702 cmnd->result = err; 4636 cmnd->result = err;
@@ -4782,6 +4716,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4782 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 4716 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4783 if (err) { 4717 if (err) {
4784 atomic_dec(&ndlp->cmd_pending); 4718 atomic_dec(&ndlp->cmd_pending);
4719 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4720 "3376 FCP could not issue IOCB err %x"
4721 "FCP cmd x%x <%d/%d> "
4722 "sid: x%x did: x%x oxid: x%x "
4723 "Data: x%x x%x x%x x%x\n",
4724 err, cmnd->cmnd[0],
4725 cmnd->device ? cmnd->device->id : 0xffff,
4726 cmnd->device ? cmnd->device->lun : 0xffff,
4727 vport->fc_myDID, ndlp->nlp_DID,
4728 phba->sli_rev == LPFC_SLI_REV4 ?
4729 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4730 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4731 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4732 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4733 (uint32_t)
4734 (cmnd->request->timeout / 1000));
4735
4736
4785 goto out_host_busy_free_buf; 4737 goto out_host_busy_free_buf;
4786 } 4738 }
4787 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4739 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5161static int 5113static int
5162lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 5114lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5163{ 5115{
5164 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5116 struct lpfc_rport_data *rdata;
5165 struct lpfc_nodelist *pnode; 5117 struct lpfc_nodelist *pnode;
5166 unsigned long later; 5118 unsigned long later;
5167 5119
5120 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5168 if (!rdata) { 5121 if (!rdata) {
5169 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5122 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5170 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 5123 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5182 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5135 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5183 return SUCCESS; 5136 return SUCCESS;
5184 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5137 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5185 rdata = cmnd->device->hostdata; 5138 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5186 if (!rdata) 5139 if (!rdata)
5187 return FAILED; 5140 return FAILED;
5188 pnode = rdata->pnode; 5141 pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5254{ 5207{
5255 struct Scsi_Host *shost = cmnd->device->host; 5208 struct Scsi_Host *shost = cmnd->device->host;
5256 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5209 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5257 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5210 struct lpfc_rport_data *rdata;
5258 struct lpfc_nodelist *pnode; 5211 struct lpfc_nodelist *pnode;
5259 unsigned tgt_id = cmnd->device->id; 5212 unsigned tgt_id = cmnd->device->id;
5260 unsigned int lun_id = cmnd->device->lun; 5213 unsigned int lun_id = cmnd->device->lun;
5261 struct lpfc_scsi_event_header scsi_event; 5214 struct lpfc_scsi_event_header scsi_event;
5262 int status; 5215 int status;
5263 5216
5217 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5264 if (!rdata) { 5218 if (!rdata) {
5265 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5219 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5266 "0798 Device Reset rport failure: rdata x%p\n", rdata); 5220 "0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5323{ 5277{
5324 struct Scsi_Host *shost = cmnd->device->host; 5278 struct Scsi_Host *shost = cmnd->device->host;
5325 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5279 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5326 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 5280 struct lpfc_rport_data *rdata;
5327 struct lpfc_nodelist *pnode; 5281 struct lpfc_nodelist *pnode;
5328 unsigned tgt_id = cmnd->device->id; 5282 unsigned tgt_id = cmnd->device->id;
5329 unsigned int lun_id = cmnd->device->lun; 5283 unsigned int lun_id = cmnd->device->lun;
5330 struct lpfc_scsi_event_header scsi_event; 5284 struct lpfc_scsi_event_header scsi_event;
5331 int status; 5285 int status;
5332 5286
5287 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5333 if (!rdata) { 5288 if (!rdata) {
5334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5289 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5335 "0799 Target Reset rport failure: rdata x%p\n", rdata); 5290 "0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5529 uint32_t num_to_alloc = 0; 5484 uint32_t num_to_alloc = 0;
5530 int num_allocated = 0; 5485 int num_allocated = 0;
5531 uint32_t sdev_cnt; 5486 uint32_t sdev_cnt;
5487 struct lpfc_device_data *device_data;
5488 unsigned long flags;
5489 struct lpfc_name target_wwpn;
5532 5490
5533 if (!rport || fc_remote_port_chkready(rport)) 5491 if (!rport || fc_remote_port_chkready(rport))
5534 return -ENXIO; 5492 return -ENXIO;
5535 5493
5536 sdev->hostdata = rport->dd_data; 5494 if (phba->cfg_EnableXLane) {
5495
5496 /*
5497 * Check to see if the device data structure for the lun
5498 * exists. If not, create one.
5499 */
5500
5501 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5502 spin_lock_irqsave(&phba->devicelock, flags);
5503 device_data = __lpfc_get_device_data(phba,
5504 &phba->luns,
5505 &vport->fc_portname,
5506 &target_wwpn,
5507 sdev->lun);
5508 if (!device_data) {
5509 spin_unlock_irqrestore(&phba->devicelock, flags);
5510 device_data = lpfc_create_device_data(phba,
5511 &vport->fc_portname,
5512 &target_wwpn,
5513 sdev->lun, true);
5514 if (!device_data)
5515 return -ENOMEM;
5516 spin_lock_irqsave(&phba->devicelock, flags);
5517 list_add_tail(&device_data->listentry, &phba->luns);
5518 }
5519 device_data->rport_data = rport->dd_data;
5520 device_data->available = true;
5521 spin_unlock_irqrestore(&phba->devicelock, flags);
5522 sdev->hostdata = device_data;
5523 } else {
5524 sdev->hostdata = rport->dd_data;
5525 }
5537 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 5526 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5538 5527
5539 /* 5528 /*
@@ -5623,11 +5612,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
5623{ 5612{
5624 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 5613 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5625 struct lpfc_hba *phba = vport->phba; 5614 struct lpfc_hba *phba = vport->phba;
5615 unsigned long flags;
5616 struct lpfc_device_data *device_data = sdev->hostdata;
5617
5626 atomic_dec(&phba->sdev_cnt); 5618 atomic_dec(&phba->sdev_cnt);
5619 if ((phba->cfg_EnableXLane) && (device_data)) {
5620 spin_lock_irqsave(&phba->devicelock, flags);
5621 device_data->available = false;
5622 if (!device_data->oas_enabled)
5623 lpfc_delete_device_data(phba, device_data);
5624 spin_unlock_irqrestore(&phba->devicelock, flags);
5625 }
5627 sdev->hostdata = NULL; 5626 sdev->hostdata = NULL;
5628 return; 5627 return;
5629} 5628}
5630 5629
5630/**
5631 * lpfc_create_device_data - creates and initializes device data structure for OAS
5632 * @pha: Pointer to host bus adapter structure.
5633 * @vport_wwpn: Pointer to vport's wwpn information
5634 * @target_wwpn: Pointer to target's wwpn information
5635 * @lun: Lun on target
5636 * @atomic_create: Flag to indicate if memory should be allocated using the
5637 * GFP_ATOMIC flag or not.
5638 *
5639 * This routine creates a device data structure which will contain identifying
5640 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5641 * whether or not the corresponding lun is available by the system,
5642 * and pointer to the rport data.
5643 *
5644 * Return codes:
5645 * NULL - Error
5646 * Pointer to lpfc_device_data - Success
5647 **/
5648struct lpfc_device_data*
5649lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5650 struct lpfc_name *target_wwpn, uint64_t lun,
5651 bool atomic_create)
5652{
5653
5654 struct lpfc_device_data *lun_info;
5655 int memory_flags;
5656
5657 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5658 !(phba->cfg_EnableXLane))
5659 return NULL;
5660
5661 /* Attempt to create the device data to contain lun info */
5662
5663 if (atomic_create)
5664 memory_flags = GFP_ATOMIC;
5665 else
5666 memory_flags = GFP_KERNEL;
5667 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5668 if (!lun_info)
5669 return NULL;
5670 INIT_LIST_HEAD(&lun_info->listentry);
5671 lun_info->rport_data = NULL;
5672 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5673 sizeof(struct lpfc_name));
5674 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5675 sizeof(struct lpfc_name));
5676 lun_info->device_id.lun = lun;
5677 lun_info->oas_enabled = false;
5678 lun_info->available = false;
5679 return lun_info;
5680}
5681
5682/**
5683 * lpfc_delete_device_data - frees a device data structure for OAS
5684 * @pha: Pointer to host bus adapter structure.
5685 * @lun_info: Pointer to device data structure to free.
5686 *
5687 * This routine frees the previously allocated device data structure passed.
5688 *
5689 **/
5690void
5691lpfc_delete_device_data(struct lpfc_hba *phba,
5692 struct lpfc_device_data *lun_info)
5693{
5694
5695 if (unlikely(!phba) || !lun_info ||
5696 !(phba->cfg_EnableXLane))
5697 return;
5698
5699 if (!list_empty(&lun_info->listentry))
5700 list_del(&lun_info->listentry);
5701 mempool_free(lun_info, phba->device_data_mem_pool);
5702 return;
5703}
5704
5705/**
5706 * __lpfc_get_device_data - returns the device data for the specified lun
5707 * @pha: Pointer to host bus adapter structure.
5708 * @list: Point to list to search.
5709 * @vport_wwpn: Pointer to vport's wwpn information
5710 * @target_wwpn: Pointer to target's wwpn information
5711 * @lun: Lun on target
5712 *
5713 * This routine searches the list passed for the specified lun's device data.
5714 * This function does not hold locks, it is the responsibility of the caller
5715 * to ensure the proper lock is held before calling the function.
5716 *
5717 * Return codes:
5718 * NULL - Error
5719 * Pointer to lpfc_device_data - Success
5720 **/
5721struct lpfc_device_data*
5722__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5723 struct lpfc_name *vport_wwpn,
5724 struct lpfc_name *target_wwpn, uint64_t lun)
5725{
5726
5727 struct lpfc_device_data *lun_info;
5728
5729 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5730 !phba->cfg_EnableXLane)
5731 return NULL;
5732
5733 /* Check to see if the lun is already enabled for OAS. */
5734
5735 list_for_each_entry(lun_info, list, listentry) {
5736 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5737 sizeof(struct lpfc_name)) == 0) &&
5738 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5739 sizeof(struct lpfc_name)) == 0) &&
5740 (lun_info->device_id.lun == lun))
5741 return lun_info;
5742 }
5743
5744 return NULL;
5745}
5746
5747/**
5748 * lpfc_find_next_oas_lun - searches for the next oas lun
5749 * @pha: Pointer to host bus adapter structure.
5750 * @vport_wwpn: Pointer to vport's wwpn information
5751 * @target_wwpn: Pointer to target's wwpn information
5752 * @starting_lun: Pointer to the lun to start searching for
5753 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5754 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5755 * @found_lun: Pointer to the found lun.
5756 * @found_lun_status: Pointer to status of the found lun.
5757 *
5758 * This routine searches the luns list for the specified lun
5759 * or the first lun for the vport/target. If the vport wwpn contains
5760 * a zero value then a specific vport is not specified. In this case
5761 * any vport which contains the lun will be considered a match. If the
5762 * target wwpn contains a zero value then a specific target is not specified.
5763 * In this case any target which contains the lun will be considered a
5764 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5765 * are returned. The function will also return the next lun if available.
5766 * If the next lun is not found, starting_lun parameter will be set to
5767 * NO_MORE_OAS_LUN.
5768 *
5769 * Return codes:
5770 * non-0 - Error
5771 * 0 - Success
5772 **/
5773bool
5774lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5775 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5776 struct lpfc_name *found_vport_wwpn,
5777 struct lpfc_name *found_target_wwpn,
5778 uint64_t *found_lun,
5779 uint32_t *found_lun_status)
5780{
5781
5782 unsigned long flags;
5783 struct lpfc_device_data *lun_info;
5784 struct lpfc_device_id *device_id;
5785 uint64_t lun;
5786 bool found = false;
5787
5788 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5789 !starting_lun || !found_vport_wwpn ||
5790 !found_target_wwpn || !found_lun || !found_lun_status ||
5791 (*starting_lun == NO_MORE_OAS_LUN) ||
5792 !phba->cfg_EnableXLane)
5793 return false;
5794
5795 lun = *starting_lun;
5796 *found_lun = NO_MORE_OAS_LUN;
5797 *starting_lun = NO_MORE_OAS_LUN;
5798
5799 /* Search for lun or the lun closet in value */
5800
5801 spin_lock_irqsave(&phba->devicelock, flags);
5802 list_for_each_entry(lun_info, &phba->luns, listentry) {
5803 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5804 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5805 sizeof(struct lpfc_name)) == 0)) &&
5806 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5807 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5808 sizeof(struct lpfc_name)) == 0)) &&
5809 (lun_info->oas_enabled)) {
5810 device_id = &lun_info->device_id;
5811 if ((!found) &&
5812 ((lun == FIND_FIRST_OAS_LUN) ||
5813 (device_id->lun == lun))) {
5814 *found_lun = device_id->lun;
5815 memcpy(found_vport_wwpn,
5816 &device_id->vport_wwpn,
5817 sizeof(struct lpfc_name));
5818 memcpy(found_target_wwpn,
5819 &device_id->target_wwpn,
5820 sizeof(struct lpfc_name));
5821 if (lun_info->available)
5822 *found_lun_status =
5823 OAS_LUN_STATUS_EXISTS;
5824 else
5825 *found_lun_status = 0;
5826 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5827 memset(vport_wwpn, 0x0,
5828 sizeof(struct lpfc_name));
5829 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5830 memset(target_wwpn, 0x0,
5831 sizeof(struct lpfc_name));
5832 found = true;
5833 } else if (found) {
5834 *starting_lun = device_id->lun;
5835 memcpy(vport_wwpn, &device_id->vport_wwpn,
5836 sizeof(struct lpfc_name));
5837 memcpy(target_wwpn, &device_id->target_wwpn,
5838 sizeof(struct lpfc_name));
5839 break;
5840 }
5841 }
5842 }
5843 spin_unlock_irqrestore(&phba->devicelock, flags);
5844 return found;
5845}
5846
5847/**
5848 * lpfc_enable_oas_lun - enables a lun for OAS operations
5849 * @pha: Pointer to host bus adapter structure.
5850 * @vport_wwpn: Pointer to vport's wwpn information
5851 * @target_wwpn: Pointer to target's wwpn information
5852 * @lun: Lun
5853 *
5854 * This routine enables a lun for oas operations. The routines does so by
5855 * doing the following :
5856 *
5857 * 1) Checks to see if the device data for the lun has been created.
5858 * 2) If found, sets the OAS enabled flag if not set and returns.
5859 * 3) Otherwise, creates a device data structure.
5860 * 4) If successfully created, indicates the device data is for an OAS lun,
5861 * indicates the lun is not available and add to the list of luns.
5862 *
5863 * Return codes:
5864 * false - Error
5865 * true - Success
5866 **/
5867bool
5868lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5869 struct lpfc_name *target_wwpn, uint64_t lun)
5870{
5871
5872 struct lpfc_device_data *lun_info;
5873 unsigned long flags;
5874
5875 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5876 !phba->cfg_EnableXLane)
5877 return false;
5878
5879 spin_lock_irqsave(&phba->devicelock, flags);
5880
5881 /* Check to see if the device data for the lun has been created */
5882 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5883 target_wwpn, lun);
5884 if (lun_info) {
5885 if (!lun_info->oas_enabled)
5886 lun_info->oas_enabled = true;
5887 spin_unlock_irqrestore(&phba->devicelock, flags);
5888 return true;
5889 }
5890
5891 /* Create an lun info structure and add to list of luns */
5892 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5893 false);
5894 if (lun_info) {
5895 lun_info->oas_enabled = true;
5896 lun_info->available = false;
5897 list_add_tail(&lun_info->listentry, &phba->luns);
5898 spin_unlock_irqrestore(&phba->devicelock, flags);
5899 return true;
5900 }
5901 spin_unlock_irqrestore(&phba->devicelock, flags);
5902 return false;
5903}
5904
5905/**
5906 * lpfc_disable_oas_lun - disables a lun for OAS operations
5907 * @pha: Pointer to host bus adapter structure.
5908 * @vport_wwpn: Pointer to vport's wwpn information
5909 * @target_wwpn: Pointer to target's wwpn information
5910 * @lun: Lun
5911 *
5912 * This routine disables a lun for oas operations. The routines does so by
5913 * doing the following :
5914 *
5915 * 1) Checks to see if the device data for the lun is created.
5916 * 2) If present, clears the flag indicating this lun is for OAS.
5917 * 3) If the lun is not available by the system, the device data is
5918 * freed.
5919 *
5920 * Return codes:
5921 * false - Error
5922 * true - Success
5923 **/
5924bool
5925lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5926 struct lpfc_name *target_wwpn, uint64_t lun)
5927{
5928
5929 struct lpfc_device_data *lun_info;
5930 unsigned long flags;
5931
5932 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5933 !phba->cfg_EnableXLane)
5934 return false;
5935
5936 spin_lock_irqsave(&phba->devicelock, flags);
5937
5938 /* Check to see if the lun is available. */
5939 lun_info = __lpfc_get_device_data(phba,
5940 &phba->luns, vport_wwpn,
5941 target_wwpn, lun);
5942 if (lun_info) {
5943 lun_info->oas_enabled = false;
5944 if (!lun_info->available)
5945 lpfc_delete_device_data(phba, lun_info);
5946 spin_unlock_irqrestore(&phba->devicelock, flags);
5947 return true;
5948 }
5949
5950 spin_unlock_irqrestore(&phba->devicelock, flags);
5951 return false;
5952}
5631 5953
5632struct scsi_host_template lpfc_template = { 5954struct scsi_host_template lpfc_template = {
5633 .module = THIS_MODULE, 5955 .module = THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7def493..0120bfccf50b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
41 struct lpfc_nodelist *pnode; /* Pointer to the node structure. */ 41 struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
42}; 42};
43 43
44struct lpfc_device_id {
45 struct lpfc_name vport_wwpn;
46 struct lpfc_name target_wwpn;
47 uint64_t lun;
48};
49
50struct lpfc_device_data {
51 struct list_head listentry;
52 struct lpfc_rport_data *rport_data;
53 struct lpfc_device_id device_id;
54 bool oas_enabled;
55 bool available;
56};
57
44struct fcp_rsp { 58struct fcp_rsp {
45 uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */ 59 uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
46 uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */ 60 uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
166#define LPFC_SCSI_DMA_EXT_SIZE 264 180#define LPFC_SCSI_DMA_EXT_SIZE 264
167#define LPFC_BPL_SIZE 1024 181#define LPFC_BPL_SIZE 1024
168#define MDAC_DIRECT_CMD 0x22 182#define MDAC_DIRECT_CMD 0x22
183
184#define FIND_FIRST_OAS_LUN 0
185#define NO_MORE_OAS_LUN -1
186#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fda443f..6bb51f8e3c1b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
635 if (!ndlp) 635 if (!ndlp)
636 goto out; 636 goto out;
637 637
638 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 638 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
639 rrq->send_rrq = 0; 639 rrq->send_rrq = 0;
640 rrq->xritag = 0; 640 rrq->xritag = 0;
641 rrq->rrq_stop_time = 0; 641 rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
678 next_time = rrq->rrq_stop_time; 678 next_time = rrq->rrq_stop_time;
679 } 679 }
680 spin_unlock_irqrestore(&phba->hbalock, iflags); 680 spin_unlock_irqrestore(&phba->hbalock, iflags);
681 if (!list_empty(&phba->active_rrq_list)) 681 if ((!list_empty(&phba->active_rrq_list)) &&
682 (!(phba->pport->load_flag & FC_UNLOADING)))
682 mod_timer(&phba->rrq_tmr, next_time); 683 mod_timer(&phba->rrq_tmr, next_time);
683 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 684 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
684 list_del(&rrq->list); 685 list_del(&rrq->list);
@@ -792,7 +793,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
792 list_del(&rrq->list); 793 list_del(&rrq->list);
793 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 794 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
794 } 795 }
795 if (!list_empty(&phba->active_rrq_list)) 796 if ((!list_empty(&phba->active_rrq_list)) &&
797 (!(phba->pport->load_flag & FC_UNLOADING)))
798
796 mod_timer(&phba->rrq_tmr, next_time); 799 mod_timer(&phba->rrq_tmr, next_time);
797} 800}
798 801
@@ -813,7 +816,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
813{ 816{
814 if (!ndlp) 817 if (!ndlp)
815 return 0; 818 return 0;
816 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 819 if (!ndlp->active_rrqs_xri_bitmap)
820 return 0;
821 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
817 return 1; 822 return 1;
818 else 823 else
819 return 0; 824 return 0;
@@ -863,7 +868,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
863 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 868 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
864 goto out; 869 goto out;
865 870
866 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 871 if (!ndlp->active_rrqs_xri_bitmap)
872 goto out;
873
874 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
867 goto out; 875 goto out;
868 876
869 spin_unlock_irqrestore(&phba->hbalock, iflags); 877 spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1318 1326
1319 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1327 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1320 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1328 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1321 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1329 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
1330 (!(piocb->vport->load_flag & FC_UNLOADING))) {
1322 if (!piocb->vport) 1331 if (!piocb->vport)
1323 BUG(); 1332 BUG();
1324 else 1333 else
@@ -4971,12 +4980,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4971 LPFC_QUEUE_REARM); 4980 LPFC_QUEUE_REARM);
4972 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 4981 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4973 } 4982 }
4983
4984 if (phba->cfg_EnableXLane)
4985 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
4986
4974 if (phba->sli4_hba.hba_eq) { 4987 if (phba->sli4_hba.hba_eq) {
4975 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 4988 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4976 fcp_eqidx++) 4989 fcp_eqidx++)
4977 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 4990 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4978 LPFC_QUEUE_REARM); 4991 LPFC_QUEUE_REARM);
4979 } 4992 }
4993
4994 if (phba->cfg_fof)
4995 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
4980} 4996}
4981 4997
4982/** 4998/**
@@ -8032,7 +8048,8 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
8032 struct lpfc_vector_map_info *cpup; 8048 struct lpfc_vector_map_info *cpup;
8033 int chann, cpu; 8049 int chann, cpu;
8034 8050
8035 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { 8051 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
8052 && phba->cfg_fcp_io_channel > 1) {
8036 cpu = smp_processor_id(); 8053 cpu = smp_processor_id();
8037 if (cpu < phba->sli4_hba.num_present_cpu) { 8054 if (cpu < phba->sli4_hba.num_present_cpu) {
8038 cpup = phba->sli4_hba.cpu_map; 8055 cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8250 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8267 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8251 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8268 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8252 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8269 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8270 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8271 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8272 if (phba->cfg_XLanePriority) {
8273 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8274 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8275 (phba->cfg_XLanePriority << 1));
8276 }
8277 }
8253 break; 8278 break;
8254 case CMD_FCP_IREAD64_CR: 8279 case CMD_FCP_IREAD64_CR:
8255 /* word3 iocb=iotag wqe=payload_offset_len */ 8280 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8271 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8296 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8272 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8297 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8273 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8298 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8299 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8300 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8301 if (phba->cfg_XLanePriority) {
8302 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8303 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8304 (phba->cfg_XLanePriority << 1));
8305 }
8306 }
8274 break; 8307 break;
8275 case CMD_FCP_ICMND64_CR: 8308 case CMD_FCP_ICMND64_CR:
8276 /* word3 iocb=iotag wqe=payload_offset_len */ 8309 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8291 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8324 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8292 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8325 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8293 iocbq->iocb.ulpFCP2Rcvy); 8326 iocbq->iocb.ulpFCP2Rcvy);
8327 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8328 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8329 if (phba->cfg_XLanePriority) {
8330 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8331 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8332 (phba->cfg_XLanePriority << 1));
8333 }
8334 }
8294 break; 8335 break;
8295 case CMD_GEN_REQUEST64_CR: 8336 case CMD_GEN_REQUEST64_CR:
8296 /* For this command calculate the xmit length of the 8337 /* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8523{ 8564{
8524 struct lpfc_sglq *sglq; 8565 struct lpfc_sglq *sglq;
8525 union lpfc_wqe wqe; 8566 union lpfc_wqe wqe;
8567 struct lpfc_queue *wq;
8526 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8568 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8527 8569
8528 if (piocb->sli4_xritag == NO_XRI) { 8570 if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8575 return IOCB_ERROR; 8617 return IOCB_ERROR;
8576 8618
8577 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8619 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8578 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8620 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8579 if (unlikely(!phba->sli4_hba.fcp_wq)) 8621 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
8580 return IOCB_ERROR; 8622 LPFC_IO_OAS))) {
8581 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8623 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8582 &wqe)) 8624 } else {
8625 wq = phba->sli4_hba.oas_wq;
8626 }
8627 if (lpfc_sli4_wq_put(wq, &wqe))
8583 return IOCB_ERROR; 8628 return IOCB_ERROR;
8584 } else { 8629 } else {
8585 if (unlikely(!phba->sli4_hba.els_wq)) 8630 if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8669 8714
8670 if (phba->sli_rev == LPFC_SLI_REV4) { 8715 if (phba->sli_rev == LPFC_SLI_REV4) {
8671 if (piocb->iocb_flag & LPFC_IO_FCP) { 8716 if (piocb->iocb_flag & LPFC_IO_FCP) {
8672 if (unlikely(!phba->sli4_hba.fcp_wq)) 8717 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
8673 return IOCB_ERROR; 8718 LPFC_IO_OAS))) {
8674 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8719 if (unlikely(!phba->sli4_hba.fcp_wq))
8675 piocb->fcp_wqidx = idx; 8720 return IOCB_ERROR;
8676 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8721 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8677 8722 piocb->fcp_wqidx = idx;
8723 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8724 } else {
8725 if (unlikely(!phba->sli4_hba.oas_wq))
8726 return IOCB_ERROR;
8727 idx = 0;
8728 piocb->fcp_wqidx = 0;
8729 ring_number = LPFC_FCP_OAS_RING;
8730 }
8678 pring = &phba->sli.ring[ring_number]; 8731 pring = &phba->sli.ring[ring_number];
8679 spin_lock_irqsave(&pring->ring_lock, iflags); 8732 spin_lock_irqsave(&pring->ring_lock, iflags);
8680 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8733 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12132 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12185 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12133} 12186}
12134 12187
12188
12189/**
12190 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12191 * entry
12192 * @phba: Pointer to HBA context object.
12193 * @eqe: Pointer to fast-path event queue entry.
12194 *
12195 * This routine process a event queue entry from the Flash Optimized Fabric
12196 * event queue. It will check the MajorCode and MinorCode to determine this
12197 * is for a completion event on a completion queue, if not, an error shall be
12198 * logged and just return. Otherwise, it will get to the corresponding
12199 * completion queue and process all the entries on the completion queue, rearm
12200 * the completion queue, and then return.
12201 **/
12202static void
12203lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12204{
12205 struct lpfc_queue *cq;
12206 struct lpfc_cqe *cqe;
12207 bool workposted = false;
12208 uint16_t cqid;
12209 int ecount = 0;
12210
12211 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12213 "9147 Not a valid completion "
12214 "event: majorcode=x%x, minorcode=x%x\n",
12215 bf_get_le32(lpfc_eqe_major_code, eqe),
12216 bf_get_le32(lpfc_eqe_minor_code, eqe));
12217 return;
12218 }
12219
12220 /* Get the reference to the corresponding CQ */
12221 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12222
12223 /* Next check for OAS */
12224 cq = phba->sli4_hba.oas_cq;
12225 if (unlikely(!cq)) {
12226 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12228 "9148 OAS completion queue "
12229 "does not exist\n");
12230 return;
12231 }
12232
12233 if (unlikely(cqid != cq->queue_id)) {
12234 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12235 "9149 Miss-matched fast-path compl "
12236 "queue id: eqcqid=%d, fcpcqid=%d\n",
12237 cqid, cq->queue_id);
12238 return;
12239 }
12240
12241 /* Process all the entries to the OAS CQ */
12242 while ((cqe = lpfc_sli4_cq_get(cq))) {
12243 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12244 if (!(++ecount % cq->entry_repost))
12245 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12246 }
12247
12248 /* Track the max number of CQEs processed in 1 EQ */
12249 if (ecount > cq->CQ_max_cqe)
12250 cq->CQ_max_cqe = ecount;
12251
12252 /* Catch the no cq entry condition */
12253 if (unlikely(ecount == 0))
12254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12255 "9153 No entry from fast-path completion "
12256 "queue fcpcqid=%d\n", cq->queue_id);
12257
12258 /* In any case, flash and re-arm the CQ */
12259 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12260
12261 /* wake up worker thread if there are works to be done */
12262 if (workposted)
12263 lpfc_worker_wake_up(phba);
12264}
12265
12266/**
12267 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12268 * @irq: Interrupt number.
12269 * @dev_id: The device context pointer.
12270 *
12271 * This function is directly called from the PCI layer as an interrupt
12272 * service routine when device with SLI-4 interface spec is enabled with
12273 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12274 * IOCB ring event in the HBA. However, when the device is enabled with either
12275 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12276 * device-level interrupt handler. When the PCI slot is in error recovery
12277 * or the HBA is undergoing initialization, the interrupt handler will not
12278 * process the interrupt. The Flash Optimized Fabric ring event are handled in
12279 * the intrrupt context. This function is called without any lock held.
12280 * It gets the hbalock to access and update SLI data structures. Note that,
12281 * the EQ to CQ are one-to-one map such that the EQ index is
12282 * equal to that of CQ index.
12283 *
12284 * This function returns IRQ_HANDLED when interrupt is handled else it
12285 * returns IRQ_NONE.
12286 **/
12287irqreturn_t
12288lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12289{
12290 struct lpfc_hba *phba;
12291 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12292 struct lpfc_queue *eq;
12293 struct lpfc_eqe *eqe;
12294 unsigned long iflag;
12295 int ecount = 0;
12296 uint32_t eqidx;
12297
12298 /* Get the driver's phba structure from the dev_id */
12299 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12300 phba = fcp_eq_hdl->phba;
12301 eqidx = fcp_eq_hdl->idx;
12302
12303 if (unlikely(!phba))
12304 return IRQ_NONE;
12305
12306 /* Get to the EQ struct associated with this vector */
12307 eq = phba->sli4_hba.fof_eq;
12308 if (unlikely(!eq))
12309 return IRQ_NONE;
12310
12311 /* Check device state for handling interrupt */
12312 if (unlikely(lpfc_intr_state_check(phba))) {
12313 eq->EQ_badstate++;
12314 /* Check again for link_state with lock held */
12315 spin_lock_irqsave(&phba->hbalock, iflag);
12316 if (phba->link_state < LPFC_LINK_DOWN)
12317 /* Flush, clear interrupt, and rearm the EQ */
12318 lpfc_sli4_eq_flush(phba, eq);
12319 spin_unlock_irqrestore(&phba->hbalock, iflag);
12320 return IRQ_NONE;
12321 }
12322
12323 /*
12324 * Process all the event on FCP fast-path EQ
12325 */
12326 while ((eqe = lpfc_sli4_eq_get(eq))) {
12327 lpfc_sli4_fof_handle_eqe(phba, eqe);
12328 if (!(++ecount % eq->entry_repost))
12329 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12330 eq->EQ_processed++;
12331 }
12332
12333 /* Track the max number of EQEs processed in 1 intr */
12334 if (ecount > eq->EQ_max_eqe)
12335 eq->EQ_max_eqe = ecount;
12336
12337
12338 if (unlikely(ecount == 0)) {
12339 eq->EQ_no_entry++;
12340
12341 if (phba->intr_type == MSIX)
12342 /* MSI-X treated interrupt served as no EQ share INT */
12343 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12344 "9145 MSI-X interrupt with no EQE\n");
12345 else {
12346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12347 "9146 ISR interrupt with no EQE\n");
12348 /* Non MSI-X treated on interrupt as EQ share INT */
12349 return IRQ_NONE;
12350 }
12351 }
12352 /* Always clear and re-arm the fast-path EQ */
12353 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12354 return IRQ_HANDLED;
12355}
12356
12135/** 12357/**
12136 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 12358 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12137 * @irq: Interrupt number. 12359 * @irq: Interrupt number.
@@ -12287,6 +12509,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
12287 hba_handled |= true; 12509 hba_handled |= true;
12288 } 12510 }
12289 12511
12512 if (phba->cfg_fof) {
12513 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12514 &phba->sli4_hba.fcp_eq_hdl[0]);
12515 if (hba_irq_rc == IRQ_HANDLED)
12516 hba_handled |= true;
12517 }
12518
12290 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 12519 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12291} /* lpfc_sli4_intr_handler */ 12520} /* lpfc_sli4_intr_handler */
12292 12521
@@ -16544,7 +16773,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16544{ 16773{
16545 LIST_HEAD(completions); 16774 LIST_HEAD(completions);
16546 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 16775 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
16547 struct lpfc_iocbq *piocbq = 0; 16776 struct lpfc_iocbq *piocbq = NULL;
16548 unsigned long iflags = 0; 16777 unsigned long iflags = 0;
16549 char *fail_msg = NULL; 16778 char *fail_msg = NULL;
16550 struct lpfc_sglq *sglq; 16779 struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f2478706e..6f04080f4ea8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
78#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
79#define LPFC_FIP_ELS_ID_SHIFT 14 79#define LPFC_FIP_ELS_ID_SHIFT 14
80 80
81#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
82
81 uint32_t drvrTimeout; /* driver timeout in seconds */ 83 uint32_t drvrTimeout; /* driver timeout in seconds */
82 uint32_t fcp_wqidx; /* index to FCP work queue */ 84 uint32_t fcp_wqidx; /* index to FCP work queue */
83 struct lpfc_vport *vport;/* virtual port pointer */ 85 struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd1a89d..9b8cda866176 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
39#define LPFC_FCP_IO_CHAN_MIN 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FCP_IO_CHAN_MAX 16 40#define LPFC_FCP_IO_CHAN_MAX 16
41 41
42/* Number of channels used for Flash Optimized Fabric (FOF) operations */
43
44#define LPFC_FOF_IO_CHAN_NUM 1
45
42/* 46/*
43 * Provide the default FCF Record attributes used by the driver 47 * Provide the default FCF Record attributes used by the driver
44 * when nonFIP mode is configured and there is no other default 48 * when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
399 uint32_t if_page_sz; 403 uint32_t if_page_sz;
400 uint32_t rq_db_window; 404 uint32_t rq_db_window;
401 uint32_t loopbk_scope; 405 uint32_t loopbk_scope;
406 uint32_t oas_supported;
402 uint32_t eq_pages_max; 407 uint32_t eq_pages_max;
403 uint32_t eqe_size; 408 uint32_t eqe_size;
404 uint32_t cq_pages_max; 409 uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
439 uint8_t lnk_no; 444 uint8_t lnk_no;
440}; 445};
441 446
447#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
448 LPFC_FOF_IO_CHAN_NUM)
442#define LPFC_SLI4_HANDLER_NAME_SZ 16 449#define LPFC_SLI4_HANDLER_NAME_SZ 16
443 450
444/* Used for IRQ vector to CPU mapping */ 451/* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
507 struct lpfc_register sli_intf; 514 struct lpfc_register sli_intf;
508 struct lpfc_pc_sli4_params pc_sli4_params; 515 struct lpfc_pc_sli4_params pc_sli4_params;
509 struct msix_entry *msix_entries; 516 struct msix_entry *msix_entries;
510 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; 517 uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
511 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 518 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
512 519
513 /* Pointers to the constructed SLI4 queues */ 520 /* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
527 uint32_t ulp0_mode; /* ULP0 protocol mode */ 534 uint32_t ulp0_mode; /* ULP0 protocol mode */
528 uint32_t ulp1_mode; /* ULP1 protocol mode */ 535 uint32_t ulp1_mode; /* ULP1 protocol mode */
529 536
537 struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
538
539 /* Optimized Access Storage specific queues/structures */
540
541 struct lpfc_queue *oas_cq; /* OAS completion queue */
542 struct lpfc_queue *oas_wq; /* OAS Work queue */
543 struct lpfc_sli_ring *oas_ring;
544 uint64_t oas_next_lun;
545 uint8_t oas_next_tgt_wwpn[8];
546 uint8_t oas_next_vpt_wwpn[8];
547
530 /* Setup information for various queue parameters */ 548 /* Setup information for various queue parameters */
531 int eq_esize; 549 int eq_esize;
532 int eq_ecount; 550 int eq_ecount;
@@ -589,6 +607,7 @@ struct lpfc_sli4_hba {
589 struct lpfc_vector_map_info *cpu_map; 607 struct lpfc_vector_map_info *cpu_map;
590 uint16_t num_online_cpu; 608 uint16_t num_online_cpu;
591 uint16_t num_present_cpu; 609 uint16_t num_present_cpu;
610 uint16_t curr_disp_cpu;
592}; 611};
593 612
594enum lpfc_sge_type { 613enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4e143b..e32cbec70324 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.43" 21#define LPFC_DRIVER_VERSION "8.3.45"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 816db12ef5d5..b7770516f4c2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -531,13 +531,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
531 int target = 0; 531 int target = 0;
532 int ldrv_num = 0; /* logical drive number */ 532 int ldrv_num = 0; /* logical drive number */
533 533
534
535 /*
536 * filter the internal and ioctl commands
537 */
538 if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
539 return (scb_t *)cmd->host_scribble;
540
541 /* 534 /*
542 * We know what channels our logical drives are on - mega_find_card() 535 * We know what channels our logical drives are on - mega_find_card()
543 */ 536 */
@@ -1439,19 +1432,22 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1439 1432
1440 cmdid = completed[i]; 1433 cmdid = completed[i];
1441 1434
1442 if( cmdid == CMDID_INT_CMDS ) { /* internal command */ 1435 /*
1436 * Only free SCBs for the commands coming down from the
1437 * mid-layer, not for which were issued internally
1438 *
1439 * For internal command, restore the status returned by the
1440 * firmware so that user can interpret it.
1441 */
1442 if (cmdid == CMDID_INT_CMDS) {
1443 scb = &adapter->int_scb; 1443 scb = &adapter->int_scb;
1444 cmd = scb->cmd;
1445 mbox = (mbox_t *)scb->raw_mbox;
1446 1444
1447 /* 1445 list_del_init(&scb->list);
1448 * Internal command interface do not fire the extended 1446 scb->state = SCB_FREE;
1449 * passthru or 64-bit passthru
1450 */
1451 pthru = scb->pthru;
1452 1447
1453 } 1448 adapter->int_status = status;
1454 else { 1449 complete(&adapter->int_waitq);
1450 } else {
1455 scb = &adapter->scb_list[cmdid]; 1451 scb = &adapter->scb_list[cmdid];
1456 1452
1457 /* 1453 /*
@@ -1640,25 +1636,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1640 cmd->result |= (DID_BAD_TARGET << 16)|status; 1636 cmd->result |= (DID_BAD_TARGET << 16)|status;
1641 } 1637 }
1642 1638
1643 /* 1639 mega_free_scb(adapter, scb);
1644 * Only free SCBs for the commands coming down from the
1645 * mid-layer, not for which were issued internally
1646 *
1647 * For internal command, restore the status returned by the
1648 * firmware so that user can interpret it.
1649 */
1650 if( cmdid == CMDID_INT_CMDS ) { /* internal command */
1651 cmd->result = status;
1652
1653 /*
1654 * Remove the internal command from the pending list
1655 */
1656 list_del_init(&scb->list);
1657 scb->state = SCB_FREE;
1658 }
1659 else {
1660 mega_free_scb(adapter, scb);
1661 }
1662 1640
1663 /* Add Scsi_Command to end of completed queue */ 1641 /* Add Scsi_Command to end of completed queue */
1664 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); 1642 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
@@ -4133,23 +4111,15 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
4133 * The last argument is the address of the passthru structure if the command 4111 * The last argument is the address of the passthru structure if the command
4134 * to be fired is a passthru command 4112 * to be fired is a passthru command
4135 * 4113 *
4136 * lockscope specifies whether the caller has already acquired the lock. Of
4137 * course, the caller must know which lock we are talking about.
4138 *
4139 * Note: parameter 'pthru' is null for non-passthru commands. 4114 * Note: parameter 'pthru' is null for non-passthru commands.
4140 */ 4115 */
4141static int 4116static int
4142mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) 4117mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4143{ 4118{
4144 Scsi_Cmnd *scmd; 4119 unsigned long flags;
4145 struct scsi_device *sdev;
4146 scb_t *scb; 4120 scb_t *scb;
4147 int rval; 4121 int rval;
4148 4122
4149 scmd = scsi_allocate_command(GFP_KERNEL);
4150 if (!scmd)
4151 return -ENOMEM;
4152
4153 /* 4123 /*
4154 * The internal commands share one command id and hence are 4124 * The internal commands share one command id and hence are
4155 * serialized. This is so because we want to reserve maximum number of 4125 * serialized. This is so because we want to reserve maximum number of
@@ -4160,73 +4130,45 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4160 scb = &adapter->int_scb; 4130 scb = &adapter->int_scb;
4161 memset(scb, 0, sizeof(scb_t)); 4131 memset(scb, 0, sizeof(scb_t));
4162 4132
4163 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 4133 scb->idx = CMDID_INT_CMDS;
4164 scmd->device = sdev; 4134 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4165
4166 memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
4167 scmd->cmnd = adapter->int_cdb;
4168 scmd->device->host = adapter->host;
4169 scmd->host_scribble = (void *)scb;
4170 scmd->cmnd[0] = MEGA_INTERNAL_CMD;
4171
4172 scb->state |= SCB_ACTIVE;
4173 scb->cmd = scmd;
4174 4135
4175 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); 4136 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4176 4137
4177 /* 4138 /*
4178 * Is it a passthru command 4139 * Is it a passthru command
4179 */ 4140 */
4180 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { 4141 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4181
4182 scb->pthru = pthru; 4142 scb->pthru = pthru;
4183 }
4184
4185 scb->idx = CMDID_INT_CMDS;
4186 4143
4187 megaraid_queue_lck(scmd, mega_internal_done); 4144 spin_lock_irqsave(&adapter->lock, flags);
4145 list_add_tail(&scb->list, &adapter->pending_list);
4146 /*
4147 * Check if the HBA is in quiescent state, e.g., during a
4148 * delete logical drive opertion. If it is, don't run
4149 * the pending_list.
4150 */
4151 if (atomic_read(&adapter->quiescent) == 0)
4152 mega_runpendq(adapter);
4153 spin_unlock_irqrestore(&adapter->lock, flags);
4188 4154
4189 wait_for_completion(&adapter->int_waitq); 4155 wait_for_completion(&adapter->int_waitq);
4190 4156
4191 rval = scmd->result; 4157 mc->status = rval = adapter->int_status;
4192 mc->status = scmd->result;
4193 kfree(sdev);
4194 4158
4195 /* 4159 /*
4196 * Print a debug message for all failed commands. Applications can use 4160 * Print a debug message for all failed commands. Applications can use
4197 * this information. 4161 * this information.
4198 */ 4162 */
4199 if( scmd->result && trace_level ) { 4163 if (rval && trace_level) {
4200 printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", 4164 printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
4201 mc->cmd, mc->opcode, mc->subopcode, scmd->result); 4165 mc->cmd, mc->opcode, mc->subopcode, rval);
4202 } 4166 }
4203 4167
4204 mutex_unlock(&adapter->int_mtx); 4168 mutex_unlock(&adapter->int_mtx);
4205
4206 scsi_free_command(GFP_KERNEL, scmd);
4207
4208 return rval; 4169 return rval;
4209} 4170}
4210 4171
4211
4212/**
4213 * mega_internal_done()
4214 * @scmd - internal scsi command
4215 *
4216 * Callback routine for internal commands.
4217 */
4218static void
4219mega_internal_done(Scsi_Cmnd *scmd)
4220{
4221 adapter_t *adapter;
4222
4223 adapter = (adapter_t *)scmd->device->host->hostdata;
4224
4225 complete(&adapter->int_waitq);
4226
4227}
4228
4229
4230static struct scsi_host_template megaraid_template = { 4172static struct scsi_host_template megaraid_template = {
4231 .module = THIS_MODULE, 4173 .module = THIS_MODULE,
4232 .name = "MegaRAID", 4174 .name = "MegaRAID",
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4d0ce4e78dfd..508d65e5a518 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -853,10 +853,10 @@ typedef struct {
853 853
854 u8 sglen; /* f/w supported scatter-gather list length */ 854 u8 sglen; /* f/w supported scatter-gather list length */
855 855
856 unsigned char int_cdb[MAX_COMMAND_SIZE];
857 scb_t int_scb; 856 scb_t int_scb;
858 struct mutex int_mtx; /* To synchronize the internal 857 struct mutex int_mtx; /* To synchronize the internal
859 commands */ 858 commands */
859 int int_status; /* status of internal cmd */
860 struct completion int_waitq; /* wait queue for internal 860 struct completion int_waitq; /* wait queue for internal
861 cmds */ 861 cmds */
862 862
@@ -1004,7 +1004,6 @@ static int mega_del_logdrv(adapter_t *, int);
1004static int mega_do_del_logdrv(adapter_t *, int); 1004static int mega_do_del_logdrv(adapter_t *, int);
1005static void mega_get_max_sgl(adapter_t *); 1005static void mega_get_max_sgl(adapter_t *);
1006static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *); 1006static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
1007static void mega_internal_done(Scsi_Cmnd *);
1008static int mega_support_cluster(adapter_t *); 1007static int mega_support_cluster(adapter_t *);
1009#endif 1008#endif
1010 1009
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index dfffd0f37916..a70692779a16 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
486 486
487 pthru32->dataxferaddr = kioc->buf_paddr; 487 pthru32->dataxferaddr = kioc->buf_paddr;
488 if (kioc->data_dir & UIOC_WR) { 488 if (kioc->data_dir & UIOC_WR) {
489 if (pthru32->dataxferlen > kioc->xferlen)
490 return -EINVAL;
489 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 491 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
490 pthru32->dataxferlen)) { 492 pthru32->dataxferlen)) {
491 return (-EFAULT); 493 return (-EFAULT);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea386ac..32166c2c7854 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "06.700.06.00-rc1" 36#define MEGASAS_VERSION "06.803.01.00-rc1"
37#define MEGASAS_RELDATE "Aug. 31, 2013" 37#define MEGASAS_RELDATE "Mar. 10, 2014"
38#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013" 38#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -48,6 +48,7 @@
48#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 48#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
49#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 49#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
50#define PCI_DEVICE_ID_LSI_FUSION 0x005b 50#define PCI_DEVICE_ID_LSI_FUSION 0x005b
51#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
51#define PCI_DEVICE_ID_LSI_INVADER 0x005d 52#define PCI_DEVICE_ID_LSI_INVADER 0x005d
52#define PCI_DEVICE_ID_LSI_FURY 0x005f 53#define PCI_DEVICE_ID_LSI_FURY 0x005f
53 54
@@ -559,7 +560,8 @@ struct megasas_ctrl_info {
559 u8 PCIE:1; 560 u8 PCIE:1;
560 u8 iSCSI:1; 561 u8 iSCSI:1;
561 u8 SAS_3G:1; 562 u8 SAS_3G:1;
562 u8 reserved_0:4; 563 u8 SRIOV:1;
564 u8 reserved_0:3;
563 u8 reserved_1[6]; 565 u8 reserved_1[6];
564 u8 port_count; 566 u8 port_count;
565 u64 port_addr[8]; 567 u64 port_addr[8];
@@ -839,7 +841,12 @@ struct megasas_ctrl_info {
839 841
840 struct { /*7A4h */ 842 struct { /*7A4h */
841#if defined(__BIG_ENDIAN_BITFIELD) 843#if defined(__BIG_ENDIAN_BITFIELD)
842 u32 reserved:11; 844 u32 reserved:5;
845 u32 activePassive:2;
846 u32 supportConfigAutoBalance:1;
847 u32 mpio:1;
848 u32 supportDataLDonSSCArray:1;
849 u32 supportPointInTimeProgress:1;
843 u32 supportUnevenSpans:1; 850 u32 supportUnevenSpans:1;
844 u32 dedicatedHotSparesLimited:1; 851 u32 dedicatedHotSparesLimited:1;
845 u32 headlessMode:1; 852 u32 headlessMode:1;
@@ -886,7 +893,12 @@ struct megasas_ctrl_info {
886 893
887 894
888 u32 supportUnevenSpans:1; 895 u32 supportUnevenSpans:1;
889 u32 reserved:11; 896 u32 supportPointInTimeProgress:1;
897 u32 supportDataLDonSSCArray:1;
898 u32 mpio:1;
899 u32 supportConfigAutoBalance:1;
900 u32 activePassive:2;
901 u32 reserved:5;
890#endif 902#endif
891 } adapterOperations2; 903 } adapterOperations2;
892 904
@@ -914,8 +926,14 @@ struct megasas_ctrl_info {
914 } cluster; 926 } cluster;
915 927
916 char clusterId[16]; /*7D4h */ 928 char clusterId[16]; /*7D4h */
929 struct {
930 u8 maxVFsSupported; /*0x7E4*/
931 u8 numVFsEnabled; /*0x7E5*/
932 u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
933 u8 reserved; /*0x7E7*/
934 } iov;
917 935
918 u8 pad[0x800-0x7E4]; /*7E4 */ 936 u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
919} __packed; 937} __packed;
920 938
921/* 939/*
@@ -986,7 +1004,9 @@ struct megasas_ctrl_info {
986 1004
987#define MFI_OB_INTR_STATUS_MASK 0x00000002 1005#define MFI_OB_INTR_STATUS_MASK 0x00000002
988#define MFI_POLL_TIMEOUT_SECS 60 1006#define MFI_POLL_TIMEOUT_SECS 60
989 1007#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
1008#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
1009#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
990#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 1010#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
991#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 1011#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
992#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) 1012#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
@@ -1347,9 +1367,15 @@ struct megasas_cmd;
1347union megasas_evt_class_locale { 1367union megasas_evt_class_locale {
1348 1368
1349 struct { 1369 struct {
1370#ifndef __BIG_ENDIAN_BITFIELD
1350 u16 locale; 1371 u16 locale;
1351 u8 reserved; 1372 u8 reserved;
1352 s8 class; 1373 s8 class;
1374#else
1375 s8 class;
1376 u8 reserved;
1377 u16 locale;
1378#endif
1353 } __attribute__ ((packed)) members; 1379 } __attribute__ ((packed)) members;
1354 1380
1355 u32 word; 1381 u32 word;
@@ -1523,6 +1549,12 @@ struct megasas_instance {
1523 dma_addr_t producer_h; 1549 dma_addr_t producer_h;
1524 u32 *consumer; 1550 u32 *consumer;
1525 dma_addr_t consumer_h; 1551 dma_addr_t consumer_h;
1552 struct MR_LD_VF_AFFILIATION *vf_affiliation;
1553 dma_addr_t vf_affiliation_h;
1554 struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
1555 dma_addr_t vf_affiliation_111_h;
1556 struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
1557 dma_addr_t hb_host_mem_h;
1526 1558
1527 u32 *reply_queue; 1559 u32 *reply_queue;
1528 dma_addr_t reply_queue_h; 1560 dma_addr_t reply_queue_h;
@@ -1598,10 +1630,73 @@ struct megasas_instance {
1598 unsigned long bar; 1630 unsigned long bar;
1599 long reset_flags; 1631 long reset_flags;
1600 struct mutex reset_mutex; 1632 struct mutex reset_mutex;
1633 struct timer_list sriov_heartbeat_timer;
1634 char skip_heartbeat_timer_del;
1635 u8 requestorId;
1636 u64 initiator_sas_address;
1637 u64 ld_sas_address[64];
1638 char PlasmaFW111;
1639 char mpio;
1601 int throttlequeuedepth; 1640 int throttlequeuedepth;
1602 u8 mask_interrupts; 1641 u8 mask_interrupts;
1603 u8 is_imr; 1642 u8 is_imr;
1604}; 1643};
1644struct MR_LD_VF_MAP {
1645 u32 size;
1646 union MR_LD_REF ref;
1647 u8 ldVfCount;
1648 u8 reserved[6];
1649 u8 policy[1];
1650};
1651
1652struct MR_LD_VF_AFFILIATION {
1653 u32 size;
1654 u8 ldCount;
1655 u8 vfCount;
1656 u8 thisVf;
1657 u8 reserved[9];
1658 struct MR_LD_VF_MAP map[1];
1659};
1660
1661/* Plasma 1.11 FW backward compatibility structures */
1662#define IOV_111_OFFSET 0x7CE
1663#define MAX_VIRTUAL_FUNCTIONS 8
1664
1665struct IOV_111 {
1666 u8 maxVFsSupported;
1667 u8 numVFsEnabled;
1668 u8 requestorId;
1669 u8 reserved[5];
1670};
1671
1672struct MR_LD_VF_MAP_111 {
1673 u8 targetId;
1674 u8 reserved[3];
1675 u8 policy[MAX_VIRTUAL_FUNCTIONS];
1676};
1677
1678struct MR_LD_VF_AFFILIATION_111 {
1679 u8 vdCount;
1680 u8 vfCount;
1681 u8 thisVf;
1682 u8 reserved[5];
1683 struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
1684};
1685
1686struct MR_CTRL_HB_HOST_MEM {
1687 struct {
1688 u32 fwCounter; /* Firmware heart beat counter */
1689 struct {
1690 u32 debugmode:1; /* 1=Firmware is in debug mode.
1691 Heart beat will not be updated. */
1692 u32 reserved:31;
1693 } debug;
1694 u32 reserved_fw[6];
1695 u32 driverCounter; /* Driver heart beat counter. 0x20 */
1696 u32 reserved_driver[7];
1697 } HB;
1698 u8 pad[0x400-0x40];
1699};
1605 1700
1606enum { 1701enum {
1607 MEGASAS_HBA_OPERATIONAL = 0, 1702 MEGASAS_HBA_OPERATIONAL = 0,
@@ -1609,6 +1704,7 @@ enum {
1609 MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, 1704 MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
1610 MEGASAS_ADPRESET_SM_OPERATIONAL = 3, 1705 MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
1611 MEGASAS_HW_CRITICAL_ERROR = 4, 1706 MEGASAS_HW_CRITICAL_ERROR = 4,
1707 MEGASAS_ADPRESET_SM_POLLING = 5,
1612 MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, 1708 MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
1613}; 1709};
1614 1710
@@ -1728,7 +1824,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1728 struct IO_REQUEST_INFO *io_info, 1824 struct IO_REQUEST_INFO *io_info,
1729 struct RAID_CONTEXT *pRAID_Context, 1825 struct RAID_CONTEXT *pRAID_Context,
1730 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); 1826 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
1731u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); 1827u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
1732struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 1828struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
1733u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); 1829u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
1734u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); 1830u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3b7ad10497fe..d84d02c2aad9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : 06.700.06.00-rc1 21 * Version : 06.803.01.00-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -75,6 +75,10 @@ static unsigned int msix_vectors;
75module_param(msix_vectors, int, S_IRUGO); 75module_param(msix_vectors, int, S_IRUGO);
76MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
77 77
78static int allow_vf_ioctls;
79module_param(allow_vf_ioctls, int, S_IRUGO);
80MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
81
78static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
79module_param(throttlequeuedepth, int, S_IRUGO); 83module_param(throttlequeuedepth, int, S_IRUGO);
80MODULE_PARM_DESC(throttlequeuedepth, 84MODULE_PARM_DESC(throttlequeuedepth,
@@ -122,6 +126,8 @@ static struct pci_device_id megasas_pci_table[] = {
122 /* xscale IOP */ 126 /* xscale IOP */
123 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 127 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
124 /* Fusion */ 128 /* Fusion */
129 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
130 /* Plasma */
125 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 131 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
126 /* Invader */ 132 /* Invader */
127 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 133 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
@@ -132,7 +138,7 @@ static struct pci_device_id megasas_pci_table[] = {
132MODULE_DEVICE_TABLE(pci, megasas_pci_table); 138MODULE_DEVICE_TABLE(pci, megasas_pci_table);
133 139
134static int megasas_mgmt_majorno; 140static int megasas_mgmt_majorno;
135static struct megasas_mgmt_info megasas_mgmt_info; 141struct megasas_mgmt_info megasas_mgmt_info;
136static struct fasync_struct *megasas_async_queue; 142static struct fasync_struct *megasas_async_queue;
137static DEFINE_MUTEX(megasas_async_queue_mutex); 143static DEFINE_MUTEX(megasas_async_queue_mutex);
138 144
@@ -171,10 +177,15 @@ megasas_get_map_info(struct megasas_instance *instance);
171int 177int
172megasas_sync_map_info(struct megasas_instance *instance); 178megasas_sync_map_info(struct megasas_instance *instance);
173int 179int
174wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); 180wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
181 int seconds);
175void megasas_reset_reply_desc(struct megasas_instance *instance); 182void megasas_reset_reply_desc(struct megasas_instance *instance);
176int megasas_reset_fusion(struct Scsi_Host *shost); 183int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
177void megasas_fusion_ocr_wq(struct work_struct *work); 184void megasas_fusion_ocr_wq(struct work_struct *work);
185static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
186 int initial);
187int megasas_check_mpio_paths(struct megasas_instance *instance,
188 struct scsi_cmnd *scmd);
178 189
179void 190void
180megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 191megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -224,6 +235,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
224 cmd->scmd = NULL; 235 cmd->scmd = NULL;
225 cmd->frame_count = 0; 236 cmd->frame_count = 0;
226 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && 237 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
238 (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
227 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && 239 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
228 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && 240 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
229 (reset_devices)) 241 (reset_devices))
@@ -877,6 +889,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
877int 889int
878megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 890megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
879{ 891{
892 int seconds;
880 893
881 struct megasas_header *frame_hdr = &cmd->frame->hdr; 894 struct megasas_header *frame_hdr = &cmd->frame->hdr;
882 895
@@ -891,13 +904,18 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
891 /* 904 /*
892 * Wait for cmd_status to change 905 * Wait for cmd_status to change
893 */ 906 */
894 return wait_and_poll(instance, cmd); 907 if (instance->requestorId)
908 seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
909 else
910 seconds = MFI_POLL_TIMEOUT_SECS;
911 return wait_and_poll(instance, cmd, seconds);
895} 912}
896 913
897/** 914/**
898 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 915 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
899 * @instance: Adapter soft state 916 * @instance: Adapter soft state
900 * @cmd: Command to be issued 917 * @cmd: Command to be issued
918 * @timeout: Timeout in seconds
901 * 919 *
902 * This function waits on an event for the command to be returned from ISR. 920 * This function waits on an event for the command to be returned from ISR.
903 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 921 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -905,13 +923,20 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
905 */ 923 */
906static int 924static int
907megasas_issue_blocked_cmd(struct megasas_instance *instance, 925megasas_issue_blocked_cmd(struct megasas_instance *instance,
908 struct megasas_cmd *cmd) 926 struct megasas_cmd *cmd, int timeout)
909{ 927{
928 int ret = 0;
910 cmd->cmd_status = ENODATA; 929 cmd->cmd_status = ENODATA;
911 930
912 instance->instancet->issue_dcmd(instance, cmd); 931 instance->instancet->issue_dcmd(instance, cmd);
913 932 if (timeout) {
914 wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); 933 ret = wait_event_timeout(instance->int_cmd_wait_q,
934 cmd->cmd_status != ENODATA, timeout * HZ);
935 if (!ret)
936 return 1;
937 } else
938 wait_event(instance->int_cmd_wait_q,
939 cmd->cmd_status != ENODATA);
915 940
916 return 0; 941 return 0;
917} 942}
@@ -920,18 +945,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
920 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 945 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
921 * @instance: Adapter soft state 946 * @instance: Adapter soft state
922 * @cmd_to_abort: Previously issued cmd to be aborted 947 * @cmd_to_abort: Previously issued cmd to be aborted
948 * @timeout: Timeout in seconds
923 * 949 *
924 * MFI firmware can abort previously issued AEN command (automatic event 950 * MFI firmware can abort previously issued AEN comamnd (automatic event
925 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 951 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
926 * cmd and waits for return status. 952 * cmd and waits for return status.
927 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 953 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
928 */ 954 */
929static int 955static int
930megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 956megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
931 struct megasas_cmd *cmd_to_abort) 957 struct megasas_cmd *cmd_to_abort, int timeout)
932{ 958{
933 struct megasas_cmd *cmd; 959 struct megasas_cmd *cmd;
934 struct megasas_abort_frame *abort_fr; 960 struct megasas_abort_frame *abort_fr;
961 int ret = 0;
935 962
936 cmd = megasas_get_cmd(instance); 963 cmd = megasas_get_cmd(instance);
937 964
@@ -957,10 +984,18 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
957 984
958 instance->instancet->issue_dcmd(instance, cmd); 985 instance->instancet->issue_dcmd(instance, cmd);
959 986
960 /* 987 if (timeout) {
961 * Wait for this cmd to complete 988 ret = wait_event_timeout(instance->abort_cmd_wait_q,
962 */ 989 cmd->cmd_status != ENODATA, timeout * HZ);
963 wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); 990 if (!ret) {
991 dev_err(&instance->pdev->dev, "Command timedout"
992 "from %s\n", __func__);
993 return 1;
994 }
995 } else
996 wait_event(instance->abort_cmd_wait_q,
997 cmd->cmd_status != ENODATA);
998
964 cmd->sync_cmd = 0; 999 cmd->sync_cmd = 0;
965 1000
966 megasas_return_cmd(instance, cmd); 1001 megasas_return_cmd(instance, cmd);
@@ -1514,9 +1549,23 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1514 1549
1515 spin_lock_irqsave(&instance->hba_lock, flags); 1550 spin_lock_irqsave(&instance->hba_lock, flags);
1516 1551
1552 /* Check for an mpio path and adjust behavior */
1553 if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
1554 if (megasas_check_mpio_paths(instance, scmd) ==
1555 (DID_RESET << 16)) {
1556 spin_unlock_irqrestore(&instance->hba_lock, flags);
1557 return SCSI_MLQUEUE_HOST_BUSY;
1558 } else {
1559 spin_unlock_irqrestore(&instance->hba_lock, flags);
1560 scmd->result = DID_NO_CONNECT << 16;
1561 done(scmd);
1562 return 0;
1563 }
1564 }
1565
1517 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 1566 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1518 spin_unlock_irqrestore(&instance->hba_lock, flags); 1567 spin_unlock_irqrestore(&instance->hba_lock, flags);
1519 scmd->result = DID_ERROR << 16; 1568 scmd->result = DID_NO_CONNECT << 16;
1520 done(scmd); 1569 done(scmd);
1521 return 0; 1570 return 0;
1522 } 1571 }
@@ -1641,9 +1690,14 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
1641 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 1690 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1642 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 1691 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1643 (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 1692 (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
1693 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
1644 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1694 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1645 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1695 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
1646 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 1696 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1697 /* Flush */
1698 readl(&instance->reg_set->doorbell);
1699 if (instance->mpio && instance->requestorId)
1700 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
1647 } else { 1701 } else {
1648 writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); 1702 writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
1649 } 1703 }
@@ -1730,6 +1784,25 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1730 megasas_check_and_restore_queue_depth(instance); 1784 megasas_check_and_restore_queue_depth(instance);
1731} 1785}
1732 1786
1787/**
1788 * megasas_start_timer - Initializes a timer object
1789 * @instance: Adapter soft state
1790 * @timer: timer object to be initialized
1791 * @fn: timer function
1792 * @interval: time interval between timer function call
1793 *
1794 */
1795void megasas_start_timer(struct megasas_instance *instance,
1796 struct timer_list *timer,
1797 void *fn, unsigned long interval)
1798{
1799 init_timer(timer);
1800 timer->expires = jiffies + interval;
1801 timer->data = (unsigned long)instance;
1802 timer->function = fn;
1803 add_timer(timer);
1804}
1805
1733static void 1806static void
1734megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 1807megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
1735 1808
@@ -1752,6 +1825,295 @@ void megasas_do_ocr(struct megasas_instance *instance)
1752 process_fw_state_change_wq(&instance->work_init); 1825 process_fw_state_change_wq(&instance->work_init);
1753} 1826}
1754 1827
1828/* This function will get the current SR-IOV LD/VF affiliation */
1829static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
1830 int initial)
1831{
1832 struct megasas_cmd *cmd;
1833 struct megasas_dcmd_frame *dcmd;
1834 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
1835 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
1836 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
1837 dma_addr_t new_affiliation_h;
1838 dma_addr_t new_affiliation_111_h;
1839 int ld, retval = 0;
1840 u8 thisVf;
1841
1842 cmd = megasas_get_cmd(instance);
1843
1844 if (!cmd) {
1845 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
1846 "affiliation: Failed to get cmd for scsi%d.\n",
1847 instance->host->host_no);
1848 return -ENOMEM;
1849 }
1850
1851 dcmd = &cmd->frame->dcmd;
1852
1853 if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
1854 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
1855 "affiliation for scsi%d.\n", instance->host->host_no);
1856 megasas_return_cmd(instance, cmd);
1857 return -ENOMEM;
1858 }
1859
1860 if (initial)
1861 if (instance->PlasmaFW111)
1862 memset(instance->vf_affiliation_111, 0,
1863 sizeof(struct MR_LD_VF_AFFILIATION_111));
1864 else
1865 memset(instance->vf_affiliation, 0,
1866 (MAX_LOGICAL_DRIVES + 1) *
1867 sizeof(struct MR_LD_VF_AFFILIATION));
1868 else {
1869 if (instance->PlasmaFW111)
1870 new_affiliation_111 =
1871 pci_alloc_consistent(instance->pdev,
1872 sizeof(struct MR_LD_VF_AFFILIATION_111),
1873 &new_affiliation_111_h);
1874 else
1875 new_affiliation =
1876 pci_alloc_consistent(instance->pdev,
1877 (MAX_LOGICAL_DRIVES + 1) *
1878 sizeof(struct MR_LD_VF_AFFILIATION),
1879 &new_affiliation_h);
1880 if (!new_affiliation && !new_affiliation_111) {
1881 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
1882 "memory for new affiliation for scsi%d.\n",
1883 instance->host->host_no);
1884 megasas_return_cmd(instance, cmd);
1885 return -ENOMEM;
1886 }
1887 if (instance->PlasmaFW111)
1888 memset(new_affiliation_111, 0,
1889 sizeof(struct MR_LD_VF_AFFILIATION_111));
1890 else
1891 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
1892 sizeof(struct MR_LD_VF_AFFILIATION));
1893 }
1894
1895 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1896
1897 dcmd->cmd = MFI_CMD_DCMD;
1898 dcmd->cmd_status = 0xFF;
1899 dcmd->sge_count = 1;
1900 dcmd->flags = MFI_FRAME_DIR_BOTH;
1901 dcmd->timeout = 0;
1902 dcmd->pad_0 = 0;
1903 if (instance->PlasmaFW111) {
1904 dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
1905 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
1906 } else {
1907 dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
1908 sizeof(struct MR_LD_VF_AFFILIATION);
1909 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
1910 }
1911
1912 if (initial) {
1913 if (instance->PlasmaFW111)
1914 dcmd->sgl.sge32[0].phys_addr =
1915 instance->vf_affiliation_111_h;
1916 else
1917 dcmd->sgl.sge32[0].phys_addr =
1918 instance->vf_affiliation_h;
1919 } else {
1920 if (instance->PlasmaFW111)
1921 dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
1922 else
1923 dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
1924 }
1925 if (instance->PlasmaFW111)
1926 dcmd->sgl.sge32[0].length =
1927 sizeof(struct MR_LD_VF_AFFILIATION_111);
1928 else
1929 dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
1930 sizeof(struct MR_LD_VF_AFFILIATION);
1931
1932 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
1933 "scsi%d\n", instance->host->host_no);
1934
1935 megasas_issue_blocked_cmd(instance, cmd, 0);
1936
1937 if (dcmd->cmd_status) {
1938 printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
1939 " failed with status 0x%x for scsi%d.\n",
1940 dcmd->cmd_status, instance->host->host_no);
1941 retval = 1; /* Do a scan if we couldn't get affiliation */
1942 goto out;
1943 }
1944
1945 if (!initial) {
1946 if (instance->PlasmaFW111) {
1947 if (!new_affiliation_111->vdCount) {
1948 printk(KERN_WARNING "megasas: SR-IOV: Got new "
1949 "LD/VF affiliation for passive path "
1950 "for scsi%d.\n",
1951 instance->host->host_no);
1952 retval = 1;
1953 goto out;
1954 }
1955 thisVf = new_affiliation_111->thisVf;
1956 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
1957 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
1958 printk(KERN_WARNING "megasas: SR-IOV: "
1959 "Got new LD/VF affiliation "
1960 "for scsi%d.\n",
1961 instance->host->host_no);
1962 memcpy(instance->vf_affiliation_111,
1963 new_affiliation_111,
1964 sizeof(struct MR_LD_VF_AFFILIATION_111));
1965 retval = 1;
1966 goto out;
1967 }
1968 } else {
1969 if (!new_affiliation->ldCount) {
1970 printk(KERN_WARNING "megasas: SR-IOV: Got new "
1971 "LD/VF affiliation for passive "
1972 "path for scsi%d.\n",
1973 instance->host->host_no);
1974 retval = 1;
1975 goto out;
1976 }
1977 newmap = new_affiliation->map;
1978 savedmap = instance->vf_affiliation->map;
1979 thisVf = new_affiliation->thisVf;
1980 for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
1981 if (savedmap->policy[thisVf] !=
1982 newmap->policy[thisVf]) {
1983 printk(KERN_WARNING "megasas: SR-IOV: "
1984 "Got new LD/VF affiliation "
1985 "for scsi%d.\n",
1986 instance->host->host_no);
1987 memcpy(instance->vf_affiliation,
1988 new_affiliation,
1989 new_affiliation->size);
1990 retval = 1;
1991 goto out;
1992 }
1993 savedmap = (struct MR_LD_VF_MAP *)
1994 ((unsigned char *)savedmap +
1995 savedmap->size);
1996 newmap = (struct MR_LD_VF_MAP *)
1997 ((unsigned char *)newmap +
1998 newmap->size);
1999 }
2000 }
2001 }
2002out:
2003 if (new_affiliation) {
2004 if (instance->PlasmaFW111)
2005 pci_free_consistent(instance->pdev,
2006 sizeof(struct MR_LD_VF_AFFILIATION_111),
2007 new_affiliation_111,
2008 new_affiliation_111_h);
2009 else
2010 pci_free_consistent(instance->pdev,
2011 (MAX_LOGICAL_DRIVES + 1) *
2012 sizeof(struct MR_LD_VF_AFFILIATION),
2013 new_affiliation, new_affiliation_h);
2014 }
2015 megasas_return_cmd(instance, cmd);
2016
2017 return retval;
2018}
2019
2020/* This function will tell FW to start the SR-IOV heartbeat */
2021int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2022 int initial)
2023{
2024 struct megasas_cmd *cmd;
2025 struct megasas_dcmd_frame *dcmd;
2026 int retval = 0;
2027
2028 cmd = megasas_get_cmd(instance);
2029
2030 if (!cmd) {
2031 printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
2032 "Failed to get cmd for scsi%d.\n",
2033 instance->host->host_no);
2034 return -ENOMEM;
2035 }
2036
2037 dcmd = &cmd->frame->dcmd;
2038
2039 if (initial) {
2040 instance->hb_host_mem =
2041 pci_alloc_consistent(instance->pdev,
2042 sizeof(struct MR_CTRL_HB_HOST_MEM),
2043 &instance->hb_host_mem_h);
2044 if (!instance->hb_host_mem) {
2045 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
2046 " memory for heartbeat host memory for "
2047 "scsi%d.\n", instance->host->host_no);
2048 retval = -ENOMEM;
2049 goto out;
2050 }
2051 memset(instance->hb_host_mem, 0,
2052 sizeof(struct MR_CTRL_HB_HOST_MEM));
2053 }
2054
2055 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2056
2057 dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
2058 dcmd->cmd = MFI_CMD_DCMD;
2059 dcmd->cmd_status = 0xFF;
2060 dcmd->sge_count = 1;
2061 dcmd->flags = MFI_FRAME_DIR_BOTH;
2062 dcmd->timeout = 0;
2063 dcmd->pad_0 = 0;
2064 dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
2065 dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
2066 dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
2067 dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
2068
2069 printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
2070 instance->host->host_no);
2071
2072 if (!megasas_issue_polled(instance, cmd)) {
2073 retval = 0;
2074 } else {
2075 printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2076 "_MEM_ALLOC DCMD timed out for scsi%d\n",
2077 instance->host->host_no);
2078 retval = 1;
2079 goto out;
2080 }
2081
2082
2083 if (dcmd->cmd_status) {
2084 printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2085 "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
2086 dcmd->cmd_status,
2087 instance->host->host_no);
2088 retval = 1;
2089 goto out;
2090 }
2091
2092out:
2093 megasas_return_cmd(instance, cmd);
2094
2095 return retval;
2096}
2097
2098/* Handler for SR-IOV heartbeat */
2099void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2100{
2101 struct megasas_instance *instance =
2102 (struct megasas_instance *)instance_addr;
2103
2104 if (instance->hb_host_mem->HB.fwCounter !=
2105 instance->hb_host_mem->HB.driverCounter) {
2106 instance->hb_host_mem->HB.driverCounter =
2107 instance->hb_host_mem->HB.fwCounter;
2108 mod_timer(&instance->sriov_heartbeat_timer,
2109 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2110 } else {
2111 printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
2112 "completed for scsi%d\n", instance->host->host_no);
2113 schedule_work(&instance->work_init);
2114 }
2115}
2116
1755/** 2117/**
1756 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2118 * megasas_wait_for_outstanding - Wait for all outstanding cmds
1757 * @instance: Adapter soft state 2119 * @instance: Adapter soft state
@@ -2014,9 +2376,10 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2014 * First wait for all commands to complete 2376 * First wait for all commands to complete
2015 */ 2377 */
2016 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 2378 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
2379 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
2017 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2380 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
2018 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 2381 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
2019 ret = megasas_reset_fusion(scmd->device->host); 2382 ret = megasas_reset_fusion(scmd->device->host, 1);
2020 else 2383 else
2021 ret = megasas_generic_reset(scmd); 2384 ret = megasas_generic_reset(scmd);
2022 2385
@@ -2731,6 +3094,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2731 (instance->pdev->device == 3094 (instance->pdev->device ==
2732 PCI_DEVICE_ID_LSI_FUSION) || 3095 PCI_DEVICE_ID_LSI_FUSION) ||
2733 (instance->pdev->device == 3096 (instance->pdev->device ==
3097 PCI_DEVICE_ID_LSI_PLASMA) ||
3098 (instance->pdev->device ==
2734 PCI_DEVICE_ID_LSI_INVADER) || 3099 PCI_DEVICE_ID_LSI_INVADER) ||
2735 (instance->pdev->device == 3100 (instance->pdev->device ==
2736 PCI_DEVICE_ID_LSI_FURY)) { 3101 PCI_DEVICE_ID_LSI_FURY)) {
@@ -2755,6 +3120,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2755 (instance->pdev->device == 3120 (instance->pdev->device ==
2756 PCI_DEVICE_ID_LSI_FUSION) || 3121 PCI_DEVICE_ID_LSI_FUSION) ||
2757 (instance->pdev->device == 3122 (instance->pdev->device ==
3123 PCI_DEVICE_ID_LSI_PLASMA) ||
3124 (instance->pdev->device ==
2758 PCI_DEVICE_ID_LSI_INVADER) || 3125 PCI_DEVICE_ID_LSI_INVADER) ||
2759 (instance->pdev->device == 3126 (instance->pdev->device ==
2760 PCI_DEVICE_ID_LSI_FURY)) { 3127 PCI_DEVICE_ID_LSI_FURY)) {
@@ -2780,6 +3147,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2780 (instance->pdev->device 3147 (instance->pdev->device
2781 == PCI_DEVICE_ID_LSI_FUSION) || 3148 == PCI_DEVICE_ID_LSI_FUSION) ||
2782 (instance->pdev->device 3149 (instance->pdev->device
3150 == PCI_DEVICE_ID_LSI_PLASMA) ||
3151 (instance->pdev->device
2783 == PCI_DEVICE_ID_LSI_INVADER) || 3152 == PCI_DEVICE_ID_LSI_INVADER) ||
2784 (instance->pdev->device 3153 (instance->pdev->device
2785 == PCI_DEVICE_ID_LSI_FURY)) { 3154 == PCI_DEVICE_ID_LSI_FURY)) {
@@ -2788,6 +3157,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2788 if ((instance->pdev->device == 3157 if ((instance->pdev->device ==
2789 PCI_DEVICE_ID_LSI_FUSION) || 3158 PCI_DEVICE_ID_LSI_FUSION) ||
2790 (instance->pdev->device == 3159 (instance->pdev->device ==
3160 PCI_DEVICE_ID_LSI_PLASMA) ||
3161 (instance->pdev->device ==
2791 PCI_DEVICE_ID_LSI_INVADER) || 3162 PCI_DEVICE_ID_LSI_INVADER) ||
2792 (instance->pdev->device == 3163 (instance->pdev->device ==
2793 PCI_DEVICE_ID_LSI_FURY)) { 3164 PCI_DEVICE_ID_LSI_FURY)) {
@@ -3014,6 +3385,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3014 cmd->frame->io.context = cpu_to_le32(cmd->index); 3385 cmd->frame->io.context = cpu_to_le32(cmd->index);
3015 cmd->frame->io.pad_0 = 0; 3386 cmd->frame->io.pad_0 = 0;
3016 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && 3387 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
3388 (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
3017 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && 3389 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
3018 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && 3390 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
3019 (reset_devices)) 3391 (reset_devices))
@@ -3620,6 +3992,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3620 struct megasas_ctrl_info *ctrl_info; 3992 struct megasas_ctrl_info *ctrl_info;
3621 unsigned long bar_list; 3993 unsigned long bar_list;
3622 int i, loop, fw_msix_count = 0; 3994 int i, loop, fw_msix_count = 0;
3995 struct IOV_111 *iovPtr;
3623 3996
3624 /* Find first memory bar */ 3997 /* Find first memory bar */
3625 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 3998 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3642,6 +4015,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3642 4015
3643 switch (instance->pdev->device) { 4016 switch (instance->pdev->device) {
3644 case PCI_DEVICE_ID_LSI_FUSION: 4017 case PCI_DEVICE_ID_LSI_FUSION:
4018 case PCI_DEVICE_ID_LSI_PLASMA:
3645 case PCI_DEVICE_ID_LSI_INVADER: 4019 case PCI_DEVICE_ID_LSI_INVADER:
3646 case PCI_DEVICE_ID_LSI_FURY: 4020 case PCI_DEVICE_ID_LSI_FURY:
3647 instance->instancet = &megasas_instance_template_fusion; 4021 instance->instancet = &megasas_instance_template_fusion;
@@ -3696,7 +4070,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
3696 scratch_pad_2 = readl 4070 scratch_pad_2 = readl
3697 (&instance->reg_set->outbound_scratch_pad_2); 4071 (&instance->reg_set->outbound_scratch_pad_2);
3698 /* Check max MSI-X vectors */ 4072 /* Check max MSI-X vectors */
3699 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { 4073 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4074 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
3700 instance->msix_vectors = (scratch_pad_2 4075 instance->msix_vectors = (scratch_pad_2
3701 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 4076 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
3702 fw_msix_count = instance->msix_vectors; 4077 fw_msix_count = instance->msix_vectors;
@@ -3763,7 +4138,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
3763 4138
3764 memset(instance->pd_list, 0 , 4139 memset(instance->pd_list, 0 ,
3765 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 4140 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
3766 megasas_get_pd_list(instance); 4141 if (megasas_get_pd_list(instance) < 0) {
4142 printk(KERN_ERR "megasas: failed to get PD list\n");
4143 goto fail_init_adapter;
4144 }
3767 4145
3768 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4146 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
3769 if (megasas_ld_list_query(instance, 4147 if (megasas_ld_list_query(instance,
@@ -3807,6 +4185,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3807 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4185 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3808 /* adapterOperations2 are converted into CPU arch*/ 4186 /* adapterOperations2 are converted into CPU arch*/
3809 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4187 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4188 instance->mpio = ctrl_info->adapterOperations2.mpio;
3810 instance->UnevenSpanSupport = 4189 instance->UnevenSpanSupport =
3811 ctrl_info->adapterOperations2.supportUnevenSpans; 4190 ctrl_info->adapterOperations2.supportUnevenSpans;
3812 if (instance->UnevenSpanSupport) { 4191 if (instance->UnevenSpanSupport) {
@@ -3819,6 +4198,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
3819 fusion->fast_path_io = 0; 4198 fusion->fast_path_io = 0;
3820 4199
3821 } 4200 }
4201 if (ctrl_info->host_interface.SRIOV) {
4202 if (!ctrl_info->adapterOperations2.activePassive)
4203 instance->PlasmaFW111 = 1;
4204
4205 if (!instance->PlasmaFW111)
4206 instance->requestorId =
4207 ctrl_info->iov.requestorId;
4208 else {
4209 iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
4210 instance->requestorId = iovPtr->requestorId;
4211 }
4212 printk(KERN_WARNING "megaraid_sas: I am VF "
4213 "requestorId %d\n", instance->requestorId);
4214 }
3822 } 4215 }
3823 instance->max_sectors_per_req = instance->max_num_sge * 4216 instance->max_sectors_per_req = instance->max_num_sge *
3824 PAGE_SIZE / 512; 4217 PAGE_SIZE / 512;
@@ -3851,6 +4244,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
3851 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 4244 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
3852 (unsigned long)instance); 4245 (unsigned long)instance);
3853 4246
4247 /* Launch SR-IOV heartbeat timer */
4248 if (instance->requestorId) {
4249 if (!megasas_sriov_start_heartbeat(instance, 1))
4250 megasas_start_timer(instance,
4251 &instance->sriov_heartbeat_timer,
4252 megasas_sriov_heartbeat_handler,
4253 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
4254 else
4255 instance->skip_heartbeat_timer_del = 1;
4256 }
4257
3854 return 0; 4258 return 0;
3855 4259
3856fail_init_adapter: 4260fail_init_adapter:
@@ -3933,16 +4337,19 @@ megasas_get_seq_num(struct megasas_instance *instance,
3933 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 4337 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
3934 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 4338 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
3935 4339
3936 megasas_issue_blocked_cmd(instance, cmd); 4340 if (megasas_issue_blocked_cmd(instance, cmd, 30))
3937 4341 dev_err(&instance->pdev->dev, "Command timedout"
3938 /* 4342 "from %s\n", __func__);
3939 * Copy the data back into callers buffer 4343 else {
3940 */ 4344 /*
3941 eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); 4345 * Copy the data back into callers buffer
3942 eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); 4346 */
3943 eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); 4347 eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
3944 eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); 4348 eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
3945 eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); 4349 eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
4350 eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
4351 eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
4352 }
3946 4353
3947 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 4354 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
3948 el_info, el_info_h); 4355 el_info, el_info_h);
@@ -4018,7 +4425,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
4018 instance->aen_cmd->abort_aen = 1; 4425 instance->aen_cmd->abort_aen = 1;
4019 ret_val = megasas_issue_blocked_abort_cmd(instance, 4426 ret_val = megasas_issue_blocked_abort_cmd(instance,
4020 instance-> 4427 instance->
4021 aen_cmd); 4428 aen_cmd, 30);
4022 4429
4023 if (ret_val) { 4430 if (ret_val) {
4024 printk(KERN_DEBUG "megasas: Failed to abort " 4431 printk(KERN_DEBUG "megasas: Failed to abort "
@@ -4160,6 +4567,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
4160 4567
4161 /* Fusion only supports host reset */ 4568 /* Fusion only supports host reset */
4162 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 4569 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4570 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
4163 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 4571 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
4164 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 4572 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
4165 host->hostt->eh_device_reset_handler = NULL; 4573 host->hostt->eh_device_reset_handler = NULL;
@@ -4197,6 +4605,19 @@ megasas_set_dma_mask(struct pci_dev *pdev)
4197 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4605 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4198 goto fail_set_dma_mask; 4606 goto fail_set_dma_mask;
4199 } 4607 }
4608 /*
4609 * Ensure that all data structures are allocated in 32-bit
4610 * memory.
4611 */
4612 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
4613 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
4614 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
4615 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
4616 dev_info(&pdev->dev, "set 32bit DMA mask"
4617 "and 32 bit consistent mask\n");
4618 else
4619 goto fail_set_dma_mask;
4620 }
4200 4621
4201 return 0; 4622 return 0;
4202 4623
@@ -4212,7 +4633,7 @@ fail_set_dma_mask:
4212static int megasas_probe_one(struct pci_dev *pdev, 4633static int megasas_probe_one(struct pci_dev *pdev,
4213 const struct pci_device_id *id) 4634 const struct pci_device_id *id)
4214{ 4635{
4215 int rval, pos, i, j; 4636 int rval, pos, i, j, cpu;
4216 struct Scsi_Host *host; 4637 struct Scsi_Host *host;
4217 struct megasas_instance *instance; 4638 struct megasas_instance *instance;
4218 u16 control = 0; 4639 u16 control = 0;
@@ -4272,6 +4693,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
4272 4693
4273 switch (instance->pdev->device) { 4694 switch (instance->pdev->device) {
4274 case PCI_DEVICE_ID_LSI_FUSION: 4695 case PCI_DEVICE_ID_LSI_FUSION:
4696 case PCI_DEVICE_ID_LSI_PLASMA:
4275 case PCI_DEVICE_ID_LSI_INVADER: 4697 case PCI_DEVICE_ID_LSI_INVADER:
4276 case PCI_DEVICE_ID_LSI_FURY: 4698 case PCI_DEVICE_ID_LSI_FURY:
4277 { 4699 {
@@ -4368,6 +4790,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
4368 instance->UnevenSpanSupport = 0; 4790 instance->UnevenSpanSupport = 0;
4369 4791
4370 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 4792 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4793 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
4371 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 4794 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
4372 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 4795 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
4373 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 4796 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -4380,12 +4803,33 @@ static int megasas_probe_one(struct pci_dev *pdev,
4380 if (megasas_init_fw(instance)) 4803 if (megasas_init_fw(instance))
4381 goto fail_init_mfi; 4804 goto fail_init_mfi;
4382 4805
4806 if (instance->requestorId) {
4807 if (instance->PlasmaFW111) {
4808 instance->vf_affiliation_111 =
4809 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
4810 &instance->vf_affiliation_111_h);
4811 if (!instance->vf_affiliation_111)
4812 printk(KERN_WARNING "megasas: Can't allocate "
4813 "memory for VF affiliation buffer\n");
4814 } else {
4815 instance->vf_affiliation =
4816 pci_alloc_consistent(pdev,
4817 (MAX_LOGICAL_DRIVES + 1) *
4818 sizeof(struct MR_LD_VF_AFFILIATION),
4819 &instance->vf_affiliation_h);
4820 if (!instance->vf_affiliation)
4821 printk(KERN_WARNING "megasas: Can't allocate "
4822 "memory for VF affiliation buffer\n");
4823 }
4824 }
4825
4383retry_irq_register: 4826retry_irq_register:
4384 /* 4827 /*
4385 * Register IRQ 4828 * Register IRQ
4386 */ 4829 */
4387 if (instance->msix_vectors) { 4830 if (instance->msix_vectors) {
4388 for (i = 0 ; i < instance->msix_vectors; i++) { 4831 cpu = cpumask_first(cpu_online_mask);
4832 for (i = 0; i < instance->msix_vectors; i++) {
4389 instance->irq_context[i].instance = instance; 4833 instance->irq_context[i].instance = instance;
4390 instance->irq_context[i].MSIxIndex = i; 4834 instance->irq_context[i].MSIxIndex = i;
4391 if (request_irq(instance->msixentry[i].vector, 4835 if (request_irq(instance->msixentry[i].vector,
@@ -4394,14 +4838,22 @@ retry_irq_register:
4394 &instance->irq_context[i])) { 4838 &instance->irq_context[i])) {
4395 printk(KERN_DEBUG "megasas: Failed to " 4839 printk(KERN_DEBUG "megasas: Failed to "
4396 "register IRQ for vector %d.\n", i); 4840 "register IRQ for vector %d.\n", i);
4397 for (j = 0 ; j < i ; j++) 4841 for (j = 0; j < i; j++) {
4842 irq_set_affinity_hint(
4843 instance->msixentry[j].vector, NULL);
4398 free_irq( 4844 free_irq(
4399 instance->msixentry[j].vector, 4845 instance->msixentry[j].vector,
4400 &instance->irq_context[j]); 4846 &instance->irq_context[j]);
4847 }
4401 /* Retry irq register for IO_APIC */ 4848 /* Retry irq register for IO_APIC */
4402 instance->msix_vectors = 0; 4849 instance->msix_vectors = 0;
4403 goto retry_irq_register; 4850 goto retry_irq_register;
4404 } 4851 }
4852 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4853 get_cpu_mask(cpu)))
4854 dev_err(&instance->pdev->dev, "Error setting"
4855 "affinity hint for cpu %d\n", cpu);
4856 cpu = cpumask_next(cpu, cpu_online_mask);
4405 } 4857 }
4406 } else { 4858 } else {
4407 instance->irq_context[0].instance = instance; 4859 instance->irq_context[0].instance = instance;
@@ -4455,13 +4907,17 @@ retry_irq_register:
4455 4907
4456 instance->instancet->disable_intr(instance); 4908 instance->instancet->disable_intr(instance);
4457 if (instance->msix_vectors) 4909 if (instance->msix_vectors)
4458 for (i = 0 ; i < instance->msix_vectors; i++) 4910 for (i = 0; i < instance->msix_vectors; i++) {
4911 irq_set_affinity_hint(
4912 instance->msixentry[i].vector, NULL);
4459 free_irq(instance->msixentry[i].vector, 4913 free_irq(instance->msixentry[i].vector,
4460 &instance->irq_context[i]); 4914 &instance->irq_context[i]);
4915 }
4461 else 4916 else
4462 free_irq(instance->pdev->irq, &instance->irq_context[0]); 4917 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4463fail_irq: 4918fail_irq:
4464 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 4919 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4920 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
4465 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 4921 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
4466 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 4922 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
4467 megasas_release_fusion(instance); 4923 megasas_release_fusion(instance);
@@ -4522,7 +4978,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)
4522 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 4978 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
4523 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4979 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4524 4980
4525 megasas_issue_blocked_cmd(instance, cmd); 4981 if (megasas_issue_blocked_cmd(instance, cmd, 30))
4982 dev_err(&instance->pdev->dev, "Command timedout"
4983 " from %s\n", __func__);
4526 4984
4527 megasas_return_cmd(instance, cmd); 4985 megasas_return_cmd(instance, cmd);
4528 4986
@@ -4549,10 +5007,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
4549 return; 5007 return;
4550 5008
4551 if (instance->aen_cmd) 5009 if (instance->aen_cmd)
4552 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); 5010 megasas_issue_blocked_abort_cmd(instance,
5011 instance->aen_cmd, 30);
4553 if (instance->map_update_cmd) 5012 if (instance->map_update_cmd)
4554 megasas_issue_blocked_abort_cmd(instance, 5013 megasas_issue_blocked_abort_cmd(instance,
4555 instance->map_update_cmd); 5014 instance->map_update_cmd, 30);
4556 dcmd = &cmd->frame->dcmd; 5015 dcmd = &cmd->frame->dcmd;
4557 5016
4558 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5017 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4566,7 +5025,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
4566 dcmd->data_xfer_len = 0; 5025 dcmd->data_xfer_len = 0;
4567 dcmd->opcode = cpu_to_le32(opcode); 5026 dcmd->opcode = cpu_to_le32(opcode);
4568 5027
4569 megasas_issue_blocked_cmd(instance, cmd); 5028 if (megasas_issue_blocked_cmd(instance, cmd, 30))
5029 dev_err(&instance->pdev->dev, "Command timedout"
5030 "from %s\n", __func__);
4570 5031
4571 megasas_return_cmd(instance, cmd); 5032 megasas_return_cmd(instance, cmd);
4572 5033
@@ -4590,6 +5051,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4590 host = instance->host; 5051 host = instance->host;
4591 instance->unload = 1; 5052 instance->unload = 1;
4592 5053
5054 /* Shutdown SR-IOV heartbeat timer */
5055 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
5056 del_timer_sync(&instance->sriov_heartbeat_timer);
5057
4593 megasas_flush_cache(instance); 5058 megasas_flush_cache(instance);
4594 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 5059 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
4595 5060
@@ -4606,9 +5071,12 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4606 instance->instancet->disable_intr(instance); 5071 instance->instancet->disable_intr(instance);
4607 5072
4608 if (instance->msix_vectors) 5073 if (instance->msix_vectors)
4609 for (i = 0 ; i < instance->msix_vectors; i++) 5074 for (i = 0; i < instance->msix_vectors; i++) {
5075 irq_set_affinity_hint(
5076 instance->msixentry[i].vector, NULL);
4610 free_irq(instance->msixentry[i].vector, 5077 free_irq(instance->msixentry[i].vector,
4611 &instance->irq_context[i]); 5078 &instance->irq_context[i]);
5079 }
4612 else 5080 else
4613 free_irq(instance->pdev->irq, &instance->irq_context[0]); 5081 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4614 if (instance->msix_vectors) 5082 if (instance->msix_vectors)
@@ -4629,7 +5097,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4629static int 5097static int
4630megasas_resume(struct pci_dev *pdev) 5098megasas_resume(struct pci_dev *pdev)
4631{ 5099{
4632 int rval, i, j; 5100 int rval, i, j, cpu;
4633 struct Scsi_Host *host; 5101 struct Scsi_Host *host;
4634 struct megasas_instance *instance; 5102 struct megasas_instance *instance;
4635 5103
@@ -4673,6 +5141,7 @@ megasas_resume(struct pci_dev *pdev)
4673 5141
4674 switch (instance->pdev->device) { 5142 switch (instance->pdev->device) {
4675 case PCI_DEVICE_ID_LSI_FUSION: 5143 case PCI_DEVICE_ID_LSI_FUSION:
5144 case PCI_DEVICE_ID_LSI_PLASMA:
4676 case PCI_DEVICE_ID_LSI_INVADER: 5145 case PCI_DEVICE_ID_LSI_INVADER:
4677 case PCI_DEVICE_ID_LSI_FURY: 5146 case PCI_DEVICE_ID_LSI_FURY:
4678 { 5147 {
@@ -4701,6 +5170,7 @@ megasas_resume(struct pci_dev *pdev)
4701 * Register IRQ 5170 * Register IRQ
4702 */ 5171 */
4703 if (instance->msix_vectors) { 5172 if (instance->msix_vectors) {
5173 cpu = cpumask_first(cpu_online_mask);
4704 for (i = 0 ; i < instance->msix_vectors; i++) { 5174 for (i = 0 ; i < instance->msix_vectors; i++) {
4705 instance->irq_context[i].instance = instance; 5175 instance->irq_context[i].instance = instance;
4706 instance->irq_context[i].MSIxIndex = i; 5176 instance->irq_context[i].MSIxIndex = i;
@@ -4710,12 +5180,21 @@ megasas_resume(struct pci_dev *pdev)
4710 &instance->irq_context[i])) { 5180 &instance->irq_context[i])) {
4711 printk(KERN_DEBUG "megasas: Failed to " 5181 printk(KERN_DEBUG "megasas: Failed to "
4712 "register IRQ for vector %d.\n", i); 5182 "register IRQ for vector %d.\n", i);
4713 for (j = 0 ; j < i ; j++) 5183 for (j = 0; j < i; j++) {
5184 irq_set_affinity_hint(
5185 instance->msixentry[j].vector, NULL);
4714 free_irq( 5186 free_irq(
4715 instance->msixentry[j].vector, 5187 instance->msixentry[j].vector,
4716 &instance->irq_context[j]); 5188 &instance->irq_context[j]);
5189 }
4717 goto fail_irq; 5190 goto fail_irq;
4718 } 5191 }
5192
5193 if (irq_set_affinity_hint(instance->msixentry[i].vector,
5194 get_cpu_mask(cpu)))
5195 dev_err(&instance->pdev->dev, "Error setting"
5196 "affinity hint for cpu %d\n", cpu);
5197 cpu = cpumask_next(cpu, cpu_online_mask);
4719 } 5198 }
4720 } else { 5199 } else {
4721 instance->irq_context[0].instance = instance; 5200 instance->irq_context[0].instance = instance;
@@ -4728,6 +5207,17 @@ megasas_resume(struct pci_dev *pdev)
4728 } 5207 }
4729 } 5208 }
4730 5209
5210 /* Re-launch SR-IOV heartbeat timer */
5211 if (instance->requestorId) {
5212 if (!megasas_sriov_start_heartbeat(instance, 0))
5213 megasas_start_timer(instance,
5214 &instance->sriov_heartbeat_timer,
5215 megasas_sriov_heartbeat_handler,
5216 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5217 else
5218 instance->skip_heartbeat_timer_del = 1;
5219 }
5220
4731 instance->instancet->enable_intr(instance); 5221 instance->instancet->enable_intr(instance);
4732 instance->unload = 0; 5222 instance->unload = 0;
4733 5223
@@ -4782,6 +5272,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
4782 host = instance->host; 5272 host = instance->host;
4783 fusion = instance->ctrl_context; 5273 fusion = instance->ctrl_context;
4784 5274
5275 /* Shutdown SR-IOV heartbeat timer */
5276 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
5277 del_timer_sync(&instance->sriov_heartbeat_timer);
5278
4785 scsi_remove_host(instance->host); 5279 scsi_remove_host(instance->host);
4786 megasas_flush_cache(instance); 5280 megasas_flush_cache(instance);
4787 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 5281 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4793,6 +5287,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
4793 instance->ev = NULL; 5287 instance->ev = NULL;
4794 } 5288 }
4795 5289
5290 /* cancel all wait events */
5291 wake_up_all(&instance->int_cmd_wait_q);
5292
4796 tasklet_kill(&instance->isr_tasklet); 5293 tasklet_kill(&instance->isr_tasklet);
4797 5294
4798 /* 5295 /*
@@ -4811,9 +5308,12 @@ static void megasas_detach_one(struct pci_dev *pdev)
4811 instance->instancet->disable_intr(instance); 5308 instance->instancet->disable_intr(instance);
4812 5309
4813 if (instance->msix_vectors) 5310 if (instance->msix_vectors)
4814 for (i = 0 ; i < instance->msix_vectors; i++) 5311 for (i = 0; i < instance->msix_vectors; i++) {
5312 irq_set_affinity_hint(
5313 instance->msixentry[i].vector, NULL);
4815 free_irq(instance->msixentry[i].vector, 5314 free_irq(instance->msixentry[i].vector,
4816 &instance->irq_context[i]); 5315 &instance->irq_context[i]);
5316 }
4817 else 5317 else
4818 free_irq(instance->pdev->irq, &instance->irq_context[0]); 5318 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4819 if (instance->msix_vectors) 5319 if (instance->msix_vectors)
@@ -4821,6 +5321,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
4821 5321
4822 switch (instance->pdev->device) { 5322 switch (instance->pdev->device) {
4823 case PCI_DEVICE_ID_LSI_FUSION: 5323 case PCI_DEVICE_ID_LSI_FUSION:
5324 case PCI_DEVICE_ID_LSI_PLASMA:
4824 case PCI_DEVICE_ID_LSI_INVADER: 5325 case PCI_DEVICE_ID_LSI_INVADER:
4825 case PCI_DEVICE_ID_LSI_FURY: 5326 case PCI_DEVICE_ID_LSI_FURY:
4826 megasas_release_fusion(instance); 5327 megasas_release_fusion(instance);
@@ -4847,6 +5348,24 @@ static void megasas_detach_one(struct pci_dev *pdev)
4847 if (instance->evt_detail) 5348 if (instance->evt_detail)
4848 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5349 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
4849 instance->evt_detail, instance->evt_detail_h); 5350 instance->evt_detail, instance->evt_detail_h);
5351
5352 if (instance->vf_affiliation)
5353 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
5354 sizeof(struct MR_LD_VF_AFFILIATION),
5355 instance->vf_affiliation,
5356 instance->vf_affiliation_h);
5357
5358 if (instance->vf_affiliation_111)
5359 pci_free_consistent(pdev,
5360 sizeof(struct MR_LD_VF_AFFILIATION_111),
5361 instance->vf_affiliation_111,
5362 instance->vf_affiliation_111_h);
5363
5364 if (instance->hb_host_mem)
5365 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
5366 instance->hb_host_mem,
5367 instance->hb_host_mem_h);
5368
4850 scsi_host_put(host); 5369 scsi_host_put(host);
4851 5370
4852 pci_disable_device(pdev); 5371 pci_disable_device(pdev);
@@ -4868,9 +5387,12 @@ static void megasas_shutdown(struct pci_dev *pdev)
4868 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 5387 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
4869 instance->instancet->disable_intr(instance); 5388 instance->instancet->disable_intr(instance);
4870 if (instance->msix_vectors) 5389 if (instance->msix_vectors)
4871 for (i = 0 ; i < instance->msix_vectors; i++) 5390 for (i = 0; i < instance->msix_vectors; i++) {
5391 irq_set_affinity_hint(
5392 instance->msixentry[i].vector, NULL);
4872 free_irq(instance->msixentry[i].vector, 5393 free_irq(instance->msixentry[i].vector,
4873 &instance->irq_context[i]); 5394 &instance->irq_context[i]);
5395 }
4874 else 5396 else
4875 free_irq(instance->pdev->irq, &instance->irq_context[0]); 5397 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4876 if (instance->msix_vectors) 5398 if (instance->msix_vectors)
@@ -5045,7 +5567,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5045 * cmd to the SCSI mid-layer 5567 * cmd to the SCSI mid-layer
5046 */ 5568 */
5047 cmd->sync_cmd = 1; 5569 cmd->sync_cmd = 1;
5048 megasas_issue_blocked_cmd(instance, cmd); 5570 megasas_issue_blocked_cmd(instance, cmd, 0);
5049 cmd->sync_cmd = 0; 5571 cmd->sync_cmd = 0;
5050 5572
5051 /* 5573 /*
@@ -5132,6 +5654,16 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
5132 goto out_kfree_ioc; 5654 goto out_kfree_ioc;
5133 } 5655 }
5134 5656
5657 /* Adjust ioctl wait time for VF mode */
5658 if (instance->requestorId)
5659 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
5660
5661 /* Block ioctls in VF mode */
5662 if (instance->requestorId && !allow_vf_ioctls) {
5663 error = -ENODEV;
5664 goto out_kfree_ioc;
5665 }
5666
5135 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 5667 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
5136 printk(KERN_ERR "Controller in crit error\n"); 5668 printk(KERN_ERR "Controller in crit error\n");
5137 error = -ENODEV; 5669 error = -ENODEV;
@@ -5441,7 +5973,7 @@ megasas_aen_polling(struct work_struct *work)
5441 u16 pd_index = 0; 5973 u16 pd_index = 0;
5442 u16 ld_index = 0; 5974 u16 ld_index = 0;
5443 int i, j, doscan = 0; 5975 int i, j, doscan = 0;
5444 u32 seq_num; 5976 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
5445 int error; 5977 int error;
5446 5978
5447 if (!instance) { 5979 if (!instance) {
@@ -5449,6 +5981,23 @@ megasas_aen_polling(struct work_struct *work)
5449 kfree(ev); 5981 kfree(ev);
5450 return; 5982 return;
5451 } 5983 }
5984
5985 /* Adjust event workqueue thread wait time for VF mode */
5986 if (instance->requestorId)
5987 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
5988
5989 /* Don't run the event workqueue thread if OCR is running */
5990 for (i = 0; i < wait_time; i++) {
5991 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
5992 break;
5993 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
5994 printk(KERN_NOTICE "megasas: %s waiting for "
5995 "controller reset to finish for scsi%d\n",
5996 __func__, instance->host->host_no);
5997 }
5998 msleep(1000);
5999 }
6000
5452 instance->ev = NULL; 6001 instance->ev = NULL;
5453 host = instance->host; 6002 host = instance->host;
5454 if (instance->evt_detail) { 6003 if (instance->evt_detail) {
@@ -5515,65 +6064,64 @@ megasas_aen_polling(struct work_struct *work)
5515 case MR_EVT_LD_OFFLINE: 6064 case MR_EVT_LD_OFFLINE:
5516 case MR_EVT_CFG_CLEARED: 6065 case MR_EVT_CFG_CLEARED:
5517 case MR_EVT_LD_DELETED: 6066 case MR_EVT_LD_DELETED:
5518 if (megasas_ld_list_query(instance, 6067 if (!instance->requestorId ||
5519 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 6068 (instance->requestorId &&
5520 megasas_get_ld_list(instance); 6069 megasas_get_ld_vf_affiliation(instance, 0))) {
5521 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 6070 if (megasas_ld_list_query(instance,
5522 for (j = 0; 6071 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5523 j < MEGASAS_MAX_DEV_PER_CHANNEL; 6072 megasas_get_ld_list(instance);
5524 j++) { 6073 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
5525 6074 for (j = 0;
5526 ld_index = 6075 j < MEGASAS_MAX_DEV_PER_CHANNEL;
5527 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 6076 j++) {
5528 6077
5529 sdev1 = scsi_device_lookup(host, 6078 ld_index =
5530 MEGASAS_MAX_PD_CHANNELS + i, 6079 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5531 j, 6080
5532 0); 6081 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5533 6082
5534 if (instance->ld_ids[ld_index] != 0xff) { 6083 if (instance->ld_ids[ld_index]
5535 if (sdev1) { 6084 != 0xff) {
5536 scsi_device_put(sdev1); 6085 if (sdev1)
5537 } 6086 scsi_device_put(sdev1);
5538 } else { 6087 } else {
5539 if (sdev1) { 6088 if (sdev1) {
5540 scsi_remove_device(sdev1); 6089 scsi_remove_device(sdev1);
5541 scsi_device_put(sdev1); 6090 scsi_device_put(sdev1);
6091 }
6092 }
5542 } 6093 }
5543 } 6094 }
5544 } 6095 doscan = 0;
5545 } 6096 }
5546 doscan = 0;
5547 break; 6097 break;
5548 case MR_EVT_LD_CREATED: 6098 case MR_EVT_LD_CREATED:
5549 if (megasas_ld_list_query(instance, 6099 if (!instance->requestorId ||
5550 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 6100 (instance->requestorId &&
5551 megasas_get_ld_list(instance); 6101 megasas_get_ld_vf_affiliation(instance, 0))) {
5552 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 6102 if (megasas_ld_list_query(instance,
5553 for (j = 0; 6103 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5554 j < MEGASAS_MAX_DEV_PER_CHANNEL; 6104 megasas_get_ld_list(instance);
5555 j++) { 6105 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
5556 ld_index = 6106 for (j = 0;
5557 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 6107 j < MEGASAS_MAX_DEV_PER_CHANNEL;
5558 6108 j++) {
5559 sdev1 = scsi_device_lookup(host, 6109 ld_index =
5560 MEGASAS_MAX_PD_CHANNELS + i, 6110 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5561 j, 0); 6111
5562 6112 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5563 if (instance->ld_ids[ld_index] != 6113
5564 0xff) { 6114 if (instance->ld_ids[ld_index]
5565 if (!sdev1) { 6115 != 0xff) {
5566 scsi_add_device(host, 6116 if (!sdev1)
5567 MEGASAS_MAX_PD_CHANNELS + i, 6117 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5568 j, 0);
5569 } 6118 }
5570 } 6119 if (sdev1)
5571 if (sdev1) { 6120 scsi_device_put(sdev1);
5572 scsi_device_put(sdev1);
5573 } 6121 }
5574 } 6122 }
6123 doscan = 0;
5575 } 6124 }
5576 doscan = 0;
5577 break; 6125 break;
5578 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 6126 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
5579 case MR_EVT_FOREIGN_CFG_IMPORTED: 6127 case MR_EVT_FOREIGN_CFG_IMPORTED:
@@ -5591,50 +6139,55 @@ megasas_aen_polling(struct work_struct *work)
5591 } 6139 }
5592 6140
5593 if (doscan) { 6141 if (doscan) {
5594 printk(KERN_INFO "scanning ...\n"); 6142 printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
5595 megasas_get_pd_list(instance); 6143 instance->host->host_no);
5596 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 6144 if (megasas_get_pd_list(instance) == 0) {
5597 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 6145 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
5598 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 6146 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
5599 sdev1 = scsi_device_lookup(host, i, j, 0); 6147 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
5600 if (instance->pd_list[pd_index].driveState == 6148 sdev1 = scsi_device_lookup(host, i, j, 0);
5601 MR_PD_STATE_SYSTEM) { 6149 if (instance->pd_list[pd_index].driveState ==
5602 if (!sdev1) { 6150 MR_PD_STATE_SYSTEM) {
5603 scsi_add_device(host, i, j, 0); 6151 if (!sdev1) {
5604 } 6152 scsi_add_device(host, i, j, 0);
5605 if (sdev1) 6153 }
5606 scsi_device_put(sdev1); 6154 if (sdev1)
5607 } else { 6155 scsi_device_put(sdev1);
5608 if (sdev1) { 6156 } else {
5609 scsi_remove_device(sdev1); 6157 if (sdev1) {
5610 scsi_device_put(sdev1); 6158 scsi_remove_device(sdev1);
6159 scsi_device_put(sdev1);
6160 }
5611 } 6161 }
5612 } 6162 }
5613 } 6163 }
5614 } 6164 }
5615 6165
5616 if (megasas_ld_list_query(instance, 6166 if (!instance->requestorId ||
5617 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 6167 (instance->requestorId &&
5618 megasas_get_ld_list(instance); 6168 megasas_get_ld_vf_affiliation(instance, 0))) {
5619 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 6169 if (megasas_ld_list_query(instance,
5620 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 6170 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5621 ld_index = 6171 megasas_get_ld_list(instance);
5622 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 6172 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
6173 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
6174 j++) {
6175 ld_index =
6176 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5623 6177
5624 sdev1 = scsi_device_lookup(host, 6178 sdev1 = scsi_device_lookup(host,
5625 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 6179 MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5626 if (instance->ld_ids[ld_index] != 0xff) { 6180 if (instance->ld_ids[ld_index]
5627 if (!sdev1) { 6181 != 0xff) {
5628 scsi_add_device(host, 6182 if (!sdev1)
5629 MEGASAS_MAX_PD_CHANNELS + i, 6183 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5630 j, 0); 6184 else
6185 scsi_device_put(sdev1);
5631 } else { 6186 } else {
5632 scsi_device_put(sdev1); 6187 if (sdev1) {
5633 } 6188 scsi_remove_device(sdev1);
5634 } else { 6189 scsi_device_put(sdev1);
5635 if (sdev1) { 6190 }
5636 scsi_remove_device(sdev1);
5637 scsi_device_put(sdev1);
5638 } 6191 }
5639 } 6192 }
5640 } 6193 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e24b6eb645b5..081bfff12d00 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -143,12 +143,12 @@ u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
143 143
144u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) 144u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
145{ 145{
146 return map->raidMap.ldSpanMap[ld].ldRaid.targetId; 146 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
147} 147}
148 148
149u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) 149u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
150{ 150{
151 return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]); 151 return map->raidMap.ldTgtIdToLd[ldTgtId];
152} 152}
153 153
154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, 154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -975,7 +975,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
975 regSize += stripSize; 975 regSize += stripSize;
976 } 976 }
977 977
978 pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); 978 pRAID_Context->timeoutValue =
979 cpu_to_le16(raid->fpIoTimeoutForLd ?
980 raid->fpIoTimeoutForLd :
981 map->raidMap.fpPdIoTimeoutSec);
979 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 982 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
980 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 983 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
981 pRAID_Context->regLockFlags = (isRead) ? 984 pRAID_Context->regLockFlags = (isRead) ?
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f6555921fd7a..22600419ae9f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -62,7 +62,8 @@ megasas_complete_cmd(struct megasas_instance *instance,
62 struct megasas_cmd *cmd, u8 alt_status); 62 struct megasas_cmd *cmd, u8 alt_status);
63int megasas_is_ldio(struct scsi_cmnd *cmd); 63int megasas_is_ldio(struct scsi_cmnd *cmd);
64int 64int
65wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); 65wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
66 int seconds);
66 67
67void 68void
68megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 69megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
@@ -81,6 +82,13 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
81void megaraid_sas_kill_hba(struct megasas_instance *instance); 82void megaraid_sas_kill_hba(struct megasas_instance *instance);
82 83
83extern u32 megasas_dbg_lvl; 84extern u32 megasas_dbg_lvl;
85void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
86int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
87 int initial);
88void megasas_start_timer(struct megasas_instance *instance,
89 struct timer_list *timer,
90 void *fn, unsigned long interval);
91extern struct megasas_mgmt_info megasas_mgmt_info;
84extern int resetwaittime; 92extern int resetwaittime;
85 93
86/** 94/**
@@ -549,12 +557,13 @@ fail_req_desc:
549 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 557 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
550 */ 558 */
551int 559int
552wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd) 560wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
561 int seconds)
553{ 562{
554 int i; 563 int i;
555 struct megasas_header *frame_hdr = &cmd->frame->hdr; 564 struct megasas_header *frame_hdr = &cmd->frame->hdr;
556 565
557 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; 566 u32 msecs = seconds * 1000;
558 567
559 /* 568 /*
560 * Wait for cmd_status to change 569 * Wait for cmd_status to change
@@ -585,7 +594,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
585 struct megasas_cmd *cmd; 594 struct megasas_cmd *cmd;
586 u8 ret; 595 u8 ret;
587 struct fusion_context *fusion; 596 struct fusion_context *fusion;
588 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 597 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
589 int i; 598 int i;
590 struct megasas_header *frame_hdr; 599 struct megasas_header *frame_hdr;
591 600
@@ -644,18 +653,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
644 /* Convert capability to LE32 */ 653 /* Convert capability to LE32 */
645 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 654 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
646 655
647 init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle); 656 init_frame->queue_info_new_phys_addr_hi =
657 cpu_to_le32(upper_32_bits(ioc_init_handle));
658 init_frame->queue_info_new_phys_addr_lo =
659 cpu_to_le32(lower_32_bits(ioc_init_handle));
648 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 660 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
649 661
650 req_desc = 662 req_desc.Words = 0;
651 (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; 663 req_desc.MFAIo.RequestFlags =
652
653 req_desc->Words = 0;
654 req_desc->MFAIo.RequestFlags =
655 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 664 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
656 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 665 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
657 cpu_to_le32s((u32 *)&req_desc->MFAIo); 666 cpu_to_le32s((u32 *)&req_desc.MFAIo);
658 req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr); 667 req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
659 668
660 /* 669 /*
661 * disable the intr before firing the init frame 670 * disable the intr before firing the init frame
@@ -669,10 +678,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
669 break; 678 break;
670 } 679 }
671 680
672 instance->instancet->fire_cmd(instance, req_desc->u.low, 681 instance->instancet->fire_cmd(instance, req_desc.u.low,
673 req_desc->u.high, instance->reg_set); 682 req_desc.u.high, instance->reg_set);
674 683
675 wait_and_poll(instance, cmd); 684 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
676 685
677 frame_hdr = &cmd->frame->hdr; 686 frame_hdr = &cmd->frame->hdr;
678 if (frame_hdr->cmd_status != 0) { 687 if (frame_hdr->cmd_status != 0) {
@@ -723,7 +732,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
723 732
724 if (!fusion) { 733 if (!fusion) {
725 megasas_return_cmd(instance, cmd); 734 megasas_return_cmd(instance, cmd);
726 return 1; 735 return -ENXIO;
727 } 736 }
728 737
729 dcmd = &cmd->frame->dcmd; 738 dcmd = &cmd->frame->dcmd;
@@ -1604,13 +1613,15 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1604 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1613 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1605 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1614 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1606 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 1615 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
1607 io_request->IoFlags |= 1616 io_request->IoFlags |= cpu_to_le16(
1608 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1617 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1609 cmd->request_desc->SCSIIO.RequestFlags = 1618 cmd->request_desc->SCSIIO.RequestFlags =
1610 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1619 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1611 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1620 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1612 cmd->request_desc->SCSIIO.DevHandle = 1621 cmd->request_desc->SCSIIO.DevHandle =
1613 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1622 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1623 cmd->request_desc->SCSIIO.MSIxIndex =
1624 instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
1614 /* 1625 /*
1615 * If the command is for the tape device, set the 1626 * If the command is for the tape device, set the
1616 * FP timeout to the os layer timeout value. 1627 * FP timeout to the os layer timeout value.
@@ -1770,7 +1781,8 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
1770 1781
1771 if (index >= instance->max_fw_cmds) { 1782 if (index >= instance->max_fw_cmds) {
1772 printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " 1783 printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
1773 "descriptor\n", index); 1784 "descriptor for scsi%d\n", index,
1785 instance->host->host_no);
1774 return NULL; 1786 return NULL;
1775 } 1787 }
1776 fusion = instance->ctrl_context; 1788 fusion = instance->ctrl_context;
@@ -2038,8 +2050,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2038 /* If we didn't complete any commands, check for FW fault */ 2050 /* If we didn't complete any commands, check for FW fault */
2039 fw_state = instance->instancet->read_fw_status_reg( 2051 fw_state = instance->instancet->read_fw_status_reg(
2040 instance->reg_set) & MFI_STATE_MASK; 2052 instance->reg_set) & MFI_STATE_MASK;
2041 if (fw_state == MFI_STATE_FAULT) 2053 if (fw_state == MFI_STATE_FAULT) {
2054 printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
2055 "for scsi%d\n", instance->host->host_no);
2042 schedule_work(&instance->work_init); 2056 schedule_work(&instance->work_init);
2057 }
2043 } 2058 }
2044 2059
2045 return IRQ_HANDLED; 2060 return IRQ_HANDLED;
@@ -2210,9 +2225,10 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
2210} 2225}
2211 2226
2212/* This function waits for outstanding commands on fusion to complete */ 2227/* This function waits for outstanding commands on fusion to complete */
2213int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) 2228int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2229 int iotimeout, int *convert)
2214{ 2230{
2215 int i, outstanding, retval = 0; 2231 int i, outstanding, retval = 0, hb_seconds_missed = 0;
2216 u32 fw_state; 2232 u32 fw_state;
2217 2233
2218 for (i = 0; i < resetwaittime; i++) { 2234 for (i = 0; i < resetwaittime; i++) {
@@ -2221,18 +2237,49 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
2221 instance->reg_set) & MFI_STATE_MASK; 2237 instance->reg_set) & MFI_STATE_MASK;
2222 if (fw_state == MFI_STATE_FAULT) { 2238 if (fw_state == MFI_STATE_FAULT) {
2223 printk(KERN_WARNING "megasas: Found FW in FAULT state," 2239 printk(KERN_WARNING "megasas: Found FW in FAULT state,"
2224 " will reset adapter.\n"); 2240 " will reset adapter scsi%d.\n",
2241 instance->host->host_no);
2242 retval = 1;
2243 goto out;
2244 }
2245 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
2246 if (instance->requestorId && !iotimeout) {
2225 retval = 1; 2247 retval = 1;
2226 goto out; 2248 goto out;
2227 } 2249 }
2228 2250
2251 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2252 if (instance->requestorId && iotimeout) {
2253 if (instance->hb_host_mem->HB.fwCounter !=
2254 instance->hb_host_mem->HB.driverCounter) {
2255 instance->hb_host_mem->HB.driverCounter =
2256 instance->hb_host_mem->HB.fwCounter;
2257 hb_seconds_missed = 0;
2258 } else {
2259 hb_seconds_missed++;
2260 if (hb_seconds_missed ==
2261 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2262 printk(KERN_WARNING "megasas: SR-IOV:"
2263 " Heartbeat never completed "
2264 " while polling during I/O "
2265 " timeout handling for "
2266 "scsi%d.\n",
2267 instance->host->host_no);
2268 *convert = 1;
2269 retval = 1;
2270 goto out;
2271 }
2272 }
2273 }
2274
2229 outstanding = atomic_read(&instance->fw_outstanding); 2275 outstanding = atomic_read(&instance->fw_outstanding);
2230 if (!outstanding) 2276 if (!outstanding)
2231 goto out; 2277 goto out;
2232 2278
2233 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2279 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2234 printk(KERN_NOTICE "megasas: [%2d]waiting for %d " 2280 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
2235 "commands to complete\n", i, outstanding); 2281 "commands to complete for scsi%d\n", i,
2282 outstanding, instance->host->host_no);
2236 megasas_complete_cmd_dpc_fusion( 2283 megasas_complete_cmd_dpc_fusion(
2237 (unsigned long)instance); 2284 (unsigned long)instance);
2238 } 2285 }
@@ -2241,7 +2288,8 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
2241 2288
2242 if (atomic_read(&instance->fw_outstanding)) { 2289 if (atomic_read(&instance->fw_outstanding)) {
2243 printk("megaraid_sas: pending commands remain after waiting, " 2290 printk("megaraid_sas: pending commands remain after waiting, "
2244 "will reset adapter.\n"); 2291 "will reset adapter scsi%d.\n",
2292 instance->host->host_no);
2245 retval = 1; 2293 retval = 1;
2246 } 2294 }
2247out: 2295out:
@@ -2263,10 +2311,34 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
2263 reply_desc->Words = ULLONG_MAX; 2311 reply_desc->Words = ULLONG_MAX;
2264} 2312}
2265 2313
2314/* Check for a second path that is currently UP */
2315int megasas_check_mpio_paths(struct megasas_instance *instance,
2316 struct scsi_cmnd *scmd)
2317{
2318 int i, j, retval = (DID_RESET << 16);
2319
2320 if (instance->mpio && instance->requestorId) {
2321 for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
2322 for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
2323 if (megasas_mgmt_info.instance[i] &&
2324 (megasas_mgmt_info.instance[i] != instance) &&
2325 megasas_mgmt_info.instance[i]->mpio &&
2326 megasas_mgmt_info.instance[i]->requestorId
2327 &&
2328 (megasas_mgmt_info.instance[i]->ld_ids[j]
2329 == scmd->device->id)) {
2330 retval = (DID_NO_CONNECT << 16);
2331 goto out;
2332 }
2333 }
2334out:
2335 return retval;
2336}
2337
2266/* Core fusion reset function */ 2338/* Core fusion reset function */
2267int megasas_reset_fusion(struct Scsi_Host *shost) 2339int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2268{ 2340{
2269 int retval = SUCCESS, i, j, retry = 0; 2341 int retval = SUCCESS, i, j, retry = 0, convert = 0;
2270 struct megasas_instance *instance; 2342 struct megasas_instance *instance;
2271 struct megasas_cmd_fusion *cmd_fusion; 2343 struct megasas_cmd_fusion *cmd_fusion;
2272 struct fusion_context *fusion; 2344 struct fusion_context *fusion;
@@ -2277,28 +2349,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2277 instance = (struct megasas_instance *)shost->hostdata; 2349 instance = (struct megasas_instance *)shost->hostdata;
2278 fusion = instance->ctrl_context; 2350 fusion = instance->ctrl_context;
2279 2351
2352 mutex_lock(&instance->reset_mutex);
2353
2280 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2354 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2281 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2355 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2282 "returning FAILED.\n"); 2356 "returning FAILED for scsi%d.\n",
2357 instance->host->host_no);
2283 return FAILED; 2358 return FAILED;
2284 } 2359 }
2285 2360
2286 mutex_lock(&instance->reset_mutex); 2361 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
2362 del_timer_sync(&instance->sriov_heartbeat_timer);
2287 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2363 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2288 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2364 instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
2289 instance->instancet->disable_intr(instance); 2365 instance->instancet->disable_intr(instance);
2290 msleep(1000); 2366 msleep(1000);
2291 2367
2292 /* First try waiting for commands to complete */ 2368 /* First try waiting for commands to complete */
2293 if (megasas_wait_for_outstanding_fusion(instance)) { 2369 if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
2370 &convert)) {
2371 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2294 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2372 printk(KERN_WARNING "megaraid_sas: resetting fusion "
2295 "adapter.\n"); 2373 "adapter scsi%d.\n", instance->host->host_no);
2374 if (convert)
2375 iotimeout = 0;
2376
2296 /* Now return commands back to the OS */ 2377 /* Now return commands back to the OS */
2297 for (i = 0 ; i < instance->max_fw_cmds; i++) { 2378 for (i = 0 ; i < instance->max_fw_cmds; i++) {
2298 cmd_fusion = fusion->cmd_list[i]; 2379 cmd_fusion = fusion->cmd_list[i];
2299 if (cmd_fusion->scmd) { 2380 if (cmd_fusion->scmd) {
2300 scsi_dma_unmap(cmd_fusion->scmd); 2381 scsi_dma_unmap(cmd_fusion->scmd);
2301 cmd_fusion->scmd->result = (DID_RESET << 16); 2382 cmd_fusion->scmd->result =
2383 megasas_check_mpio_paths(instance,
2384 cmd_fusion->scmd);
2302 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); 2385 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
2303 megasas_return_cmd_fusion(instance, cmd_fusion); 2386 megasas_return_cmd_fusion(instance, cmd_fusion);
2304 atomic_dec(&instance->fw_outstanding); 2387 atomic_dec(&instance->fw_outstanding);
@@ -2313,13 +2396,67 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2313 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2396 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2314 /* Reset not supported, kill adapter */ 2397 /* Reset not supported, kill adapter */
2315 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2398 printk(KERN_WARNING "megaraid_sas: Reset not supported"
2316 ", killing adapter.\n"); 2399 ", killing adapter scsi%d.\n",
2400 instance->host->host_no);
2317 megaraid_sas_kill_hba(instance); 2401 megaraid_sas_kill_hba(instance);
2402 instance->skip_heartbeat_timer_del = 1;
2318 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; 2403 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2319 retval = FAILED; 2404 retval = FAILED;
2320 goto out; 2405 goto out;
2321 } 2406 }
2322 2407
2408 /* Let SR-IOV VF & PF sync up if there was a HB failure */
2409 if (instance->requestorId && !iotimeout) {
2410 msleep(MEGASAS_OCR_SETTLE_TIME_VF);
2411 /* Look for a late HB update after VF settle time */
2412 if (abs_state == MFI_STATE_OPERATIONAL &&
2413 (instance->hb_host_mem->HB.fwCounter !=
2414 instance->hb_host_mem->HB.driverCounter)) {
2415 instance->hb_host_mem->HB.driverCounter =
2416 instance->hb_host_mem->HB.fwCounter;
2417 printk(KERN_WARNING "megasas: SR-IOV:"
2418 "Late FW heartbeat update for "
2419 "scsi%d.\n",
2420 instance->host->host_no);
2421 } else {
2422 /* In VF mode, first poll for FW ready */
2423 for (i = 0;
2424 i < (MEGASAS_RESET_WAIT_TIME * 1000);
2425 i += 20) {
2426 status_reg =
2427 instance->instancet->
2428 read_fw_status_reg(
2429 instance->reg_set);
2430 abs_state = status_reg &
2431 MFI_STATE_MASK;
2432 if (abs_state == MFI_STATE_READY) {
2433 printk(KERN_WARNING "megasas"
2434 ": SR-IOV: FW was found"
2435 "to be in ready state "
2436 "for scsi%d.\n",
2437 instance->host->host_no);
2438 break;
2439 }
2440 msleep(20);
2441 }
2442 if (abs_state != MFI_STATE_READY) {
2443 printk(KERN_WARNING "megasas: SR-IOV: "
2444 "FW not in ready state after %d"
2445 " seconds for scsi%d, status_reg = "
2446 "0x%x.\n",
2447 MEGASAS_RESET_WAIT_TIME,
2448 instance->host->host_no,
2449 status_reg);
2450 megaraid_sas_kill_hba(instance);
2451 instance->skip_heartbeat_timer_del = 1;
2452 instance->adprecovery =
2453 MEGASAS_HW_CRITICAL_ERROR;
2454 retval = FAILED;
2455 goto out;
2456 }
2457 }
2458 }
2459
2323 /* Now try to reset the chip */ 2460 /* Now try to reset the chip */
2324 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { 2461 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
2325 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, 2462 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
@@ -2346,7 +2483,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2346 readl(&instance->reg_set->fusion_host_diag); 2483 readl(&instance->reg_set->fusion_host_diag);
2347 if (retry++ == 100) { 2484 if (retry++ == 100) {
2348 printk(KERN_WARNING "megaraid_sas: " 2485 printk(KERN_WARNING "megaraid_sas: "
2349 "Host diag unlock failed!\n"); 2486 "Host diag unlock failed! "
2487 "for scsi%d\n",
2488 instance->host->host_no);
2350 break; 2489 break;
2351 } 2490 }
2352 } 2491 }
@@ -2368,7 +2507,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2368 if (retry++ == 1000) { 2507 if (retry++ == 1000) {
2369 printk(KERN_WARNING "megaraid_sas: " 2508 printk(KERN_WARNING "megaraid_sas: "
2370 "Diag reset adapter never " 2509 "Diag reset adapter never "
2371 "cleared!\n"); 2510 "cleared for scsi%d!\n",
2511 instance->host->host_no);
2372 break; 2512 break;
2373 } 2513 }
2374 } 2514 }
@@ -2390,29 +2530,29 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2390 if (abs_state <= MFI_STATE_FW_INIT) { 2530 if (abs_state <= MFI_STATE_FW_INIT) {
2391 printk(KERN_WARNING "megaraid_sas: firmware " 2531 printk(KERN_WARNING "megaraid_sas: firmware "
2392 "state < MFI_STATE_FW_INIT, state = " 2532 "state < MFI_STATE_FW_INIT, state = "
2393 "0x%x\n", abs_state); 2533 "0x%x for scsi%d\n", abs_state,
2534 instance->host->host_no);
2394 continue; 2535 continue;
2395 } 2536 }
2396 2537
2397 /* Wait for FW to become ready */ 2538 /* Wait for FW to become ready */
2398 if (megasas_transition_to_ready(instance, 1)) { 2539 if (megasas_transition_to_ready(instance, 1)) {
2399 printk(KERN_WARNING "megaraid_sas: Failed to " 2540 printk(KERN_WARNING "megaraid_sas: Failed to "
2400 "transition controller to ready.\n"); 2541 "transition controller to ready "
2542 "for scsi%d.\n",
2543 instance->host->host_no);
2401 continue; 2544 continue;
2402 } 2545 }
2403 2546
2404 megasas_reset_reply_desc(instance); 2547 megasas_reset_reply_desc(instance);
2405 if (megasas_ioc_init_fusion(instance)) { 2548 if (megasas_ioc_init_fusion(instance)) {
2406 printk(KERN_WARNING "megaraid_sas: " 2549 printk(KERN_WARNING "megaraid_sas: "
2407 "megasas_ioc_init_fusion() failed!\n"); 2550 "megasas_ioc_init_fusion() failed!"
2551 " for scsi%d\n",
2552 instance->host->host_no);
2408 continue; 2553 continue;
2409 } 2554 }
2410 2555
2411 clear_bit(MEGASAS_FUSION_IN_RESET,
2412 &instance->reset_flags);
2413 instance->instancet->enable_intr(instance);
2414 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2415
2416 /* Re-fire management commands */ 2556 /* Re-fire management commands */
2417 for (j = 0 ; j < instance->max_fw_cmds; j++) { 2557 for (j = 0 ; j < instance->max_fw_cmds; j++) {
2418 cmd_fusion = fusion->cmd_list[j]; 2558 cmd_fusion = fusion->cmd_list[j];
@@ -2422,7 +2562,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2422 instance-> 2562 instance->
2423 cmd_list[cmd_fusion->sync_cmd_idx]; 2563 cmd_list[cmd_fusion->sync_cmd_idx];
2424 if (cmd_mfi->frame->dcmd.opcode == 2564 if (cmd_mfi->frame->dcmd.opcode ==
2425 MR_DCMD_LD_MAP_GET_INFO) { 2565 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
2426 megasas_return_cmd(instance, 2566 megasas_return_cmd(instance,
2427 cmd_mfi); 2567 cmd_mfi);
2428 megasas_return_cmd_fusion( 2568 megasas_return_cmd_fusion(
@@ -2433,11 +2573,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2433 instance, 2573 instance,
2434 cmd_mfi->context.smid 2574 cmd_mfi->context.smid
2435 -1); 2575 -1);
2436 if (!req_desc) 2576 if (!req_desc) {
2437 printk(KERN_WARNING 2577 printk(KERN_WARNING
2438 "req_desc NULL" 2578 "req_desc NULL"
2439 "\n"); 2579 " for scsi%d\n",
2440 else { 2580 instance->host->host_no);
2581 /* Return leaked MPT
2582 frame */
2583 megasas_return_cmd_fusion(instance, cmd_fusion);
2584 } else {
2441 instance->instancet-> 2585 instance->instancet->
2442 fire_cmd(instance, 2586 fire_cmd(instance,
2443 req_desc-> 2587 req_desc->
@@ -2451,6 +2595,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2451 } 2595 }
2452 } 2596 }
2453 2597
2598 clear_bit(MEGASAS_FUSION_IN_RESET,
2599 &instance->reset_flags);
2600 instance->instancet->enable_intr(instance);
2601 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2602
2454 /* Reset load balance info */ 2603 /* Reset load balance info */
2455 memset(fusion->load_balance_info, 0, 2604 memset(fusion->load_balance_info, 0,
2456 sizeof(struct LD_LOAD_BALANCE_INFO) 2605 sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2459,18 +2608,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2459 if (!megasas_get_map_info(instance)) 2608 if (!megasas_get_map_info(instance))
2460 megasas_sync_map_info(instance); 2609 megasas_sync_map_info(instance);
2461 2610
2611 /* Restart SR-IOV heartbeat */
2612 if (instance->requestorId) {
2613 if (!megasas_sriov_start_heartbeat(instance, 0))
2614 megasas_start_timer(instance,
2615 &instance->sriov_heartbeat_timer,
2616 megasas_sriov_heartbeat_handler,
2617 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2618 else
2619 instance->skip_heartbeat_timer_del = 1;
2620 }
2621
2462 /* Adapter reset completed successfully */ 2622 /* Adapter reset completed successfully */
2463 printk(KERN_WARNING "megaraid_sas: Reset " 2623 printk(KERN_WARNING "megaraid_sas: Reset "
2464 "successful.\n"); 2624 "successful for scsi%d.\n",
2625 instance->host->host_no);
2465 retval = SUCCESS; 2626 retval = SUCCESS;
2466 goto out; 2627 goto out;
2467 } 2628 }
2468 /* Reset failed, kill the adapter */ 2629 /* Reset failed, kill the adapter */
2469 printk(KERN_WARNING "megaraid_sas: Reset failed, killing " 2630 printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
2470 "adapter.\n"); 2631 "adapter scsi%d.\n", instance->host->host_no);
2471 megaraid_sas_kill_hba(instance); 2632 megaraid_sas_kill_hba(instance);
2633 instance->skip_heartbeat_timer_del = 1;
2634 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2472 retval = FAILED; 2635 retval = FAILED;
2473 } else { 2636 } else {
2637 /* For VF: Restart HB timer if we didn't OCR */
2638 if (instance->requestorId) {
2639 megasas_start_timer(instance,
2640 &instance->sriov_heartbeat_timer,
2641 megasas_sriov_heartbeat_handler,
2642 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2643 }
2474 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2644 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2475 instance->instancet->enable_intr(instance); 2645 instance->instancet->enable_intr(instance);
2476 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2646 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2487,7 +2657,7 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
2487 struct megasas_instance *instance = 2657 struct megasas_instance *instance =
2488 container_of(work, struct megasas_instance, work_init); 2658 container_of(work, struct megasas_instance, work_init);
2489 2659
2490 megasas_reset_fusion(instance->host); 2660 megasas_reset_fusion(instance->host, 0);
2491} 2661}
2492 2662
2493struct megasas_instance_template megasas_instance_template_fusion = { 2663struct megasas_instance_template megasas_instance_template_fusion = {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 35a51397b364..e76af5459a09 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -485,6 +485,9 @@ struct MPI2_IOC_INIT_REQUEST {
485#define MAX_PHYSICAL_DEVICES 256 485#define MAX_PHYSICAL_DEVICES 256
486#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) 486#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
487#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 487#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
488#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
489#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
490#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
488 491
489struct MR_DEV_HANDLE_INFO { 492struct MR_DEV_HANDLE_INFO {
490 u16 curDevHdl; 493 u16 curDevHdl;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 62f1a6031765..0d78a4d5576c 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -453,7 +453,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt)
453 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); 453 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
454 454
455 if (instance->irq != SCSI_IRQ_NONE) 455 if (instance->irq != SCSI_IRQ_NONE)
456 if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, 456 if (request_irq(instance->irq, pas16_intr, 0,
457 "pas16", instance)) { 457 "pas16", instance)) {
458 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 458 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
459 instance->host_no, instance->irq); 459 instance->host_no, instance->irq);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a04b4ff8c7f6..28b4e8139153 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -323,24 +323,17 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
323 int offset; 323 int offset;
324 char *str = buf; 324 char *str = buf;
325 int start = 0; 325 int start = 0;
326#define IB_MEMMAP(c) \ 326#define IB_MEMMAP(c) \
327 (*(u32 *)((u8 *)pm8001_ha-> \ 327 (*(u32 *)((u8 *)pm8001_ha-> \
328 memoryMap.region[IB].virt_ptr + \ 328 memoryMap.region[IB].virt_ptr + \
329 pm8001_ha->evtlog_ib_offset + (c))) 329 pm8001_ha->evtlog_ib_offset + (c)))
330 330
331 for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { 331 for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
332 if (pm8001_ha->chip_id != chip_8001) 332 str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
333 str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
334 else
335 str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
336 start = start + 4; 333 start = start + 4;
337 } 334 }
338 pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; 335 pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
339 if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 336 if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
340 && (pm8001_ha->chip_id != chip_8001))
341 pm8001_ha->evtlog_ib_offset = 0;
342 if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
343 && (pm8001_ha->chip_id == chip_8001))
344 pm8001_ha->evtlog_ib_offset = 0; 337 pm8001_ha->evtlog_ib_offset = 0;
345 338
346 return str - buf; 339 return str - buf;
@@ -363,24 +356,17 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
363 int offset; 356 int offset;
364 char *str = buf; 357 char *str = buf;
365 int start = 0; 358 int start = 0;
366#define OB_MEMMAP(c) \ 359#define OB_MEMMAP(c) \
367 (*(u32 *)((u8 *)pm8001_ha-> \ 360 (*(u32 *)((u8 *)pm8001_ha-> \
368 memoryMap.region[OB].virt_ptr + \ 361 memoryMap.region[OB].virt_ptr + \
369 pm8001_ha->evtlog_ob_offset + (c))) 362 pm8001_ha->evtlog_ob_offset + (c)))
370 363
371 for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { 364 for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
372 if (pm8001_ha->chip_id != chip_8001) 365 str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
373 str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
374 else
375 str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
376 start = start + 4; 366 start = start + 4;
377 } 367 }
378 pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; 368 pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
379 if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 369 if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
380 && (pm8001_ha->chip_id != chip_8001))
381 pm8001_ha->evtlog_ob_offset = 0;
382 if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
383 && (pm8001_ha->chip_id == chip_8001))
384 pm8001_ha->evtlog_ob_offset = 0; 370 pm8001_ha->evtlog_ob_offset = 0;
385 371
386 return str - buf; 372 return str - buf;
@@ -466,7 +452,7 @@ static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
466static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, 452static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
467 struct device_attribute *attr, char *buf) 453 struct device_attribute *attr, char *buf)
468{ 454{
469 u32 count; 455 ssize_t count;
470 456
471 count = pm80xx_get_fatal_dump(cdev, attr, buf); 457 count = pm80xx_get_fatal_dump(cdev, attr, buf);
472 return count; 458 return count;
@@ -484,7 +470,7 @@ static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
484static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, 470static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
485 struct device_attribute *attr, char *buf) 471 struct device_attribute *attr, char *buf)
486{ 472{
487 u32 count; 473 ssize_t count;
488 474
489 count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); 475 count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
490 return count; 476 return count;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 0a1296a87d66..a97be015e52e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -644,7 +644,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
644 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); 644 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
645 /* 8081 controllers need BAR shift to access MPI space 645 /* 8081 controllers need BAR shift to access MPI space
646 * as this is shared with BIOS data */ 646 * as this is shared with BIOS data */
647 if (deviceid == 0x8081) { 647 if (deviceid == 0x8081 || deviceid == 0x0042) {
648 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { 648 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
649 PM8001_FAIL_DBG(pm8001_ha, 649 PM8001_FAIL_DBG(pm8001_ha,
650 pm8001_printk("Shift Bar4 to 0x%x failed\n", 650 pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -673,7 +673,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
673 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) 673 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
674 update_outbnd_queue_table(pm8001_ha, i); 674 update_outbnd_queue_table(pm8001_ha, i);
675 /* 8081 controller donot require these operations */ 675 /* 8081 controller donot require these operations */
676 if (deviceid != 0x8081) { 676 if (deviceid != 0x8081 && deviceid != 0x0042) {
677 mpi_set_phys_g3_with_ssc(pm8001_ha, 0); 677 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
678 /* 7->130ms, 34->500ms, 119->1.5s */ 678 /* 7->130ms, 34->500ms, 119->1.5s */
679 mpi_set_open_retry_interval_reg(pm8001_ha, 119); 679 mpi_set_open_retry_interval_reg(pm8001_ha, 119);
@@ -701,7 +701,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
701 u32 gst_len_mpistate; 701 u32 gst_len_mpistate;
702 u16 deviceid; 702 u16 deviceid;
703 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); 703 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
704 if (deviceid == 0x8081) { 704 if (deviceid == 0x8081 || deviceid == 0x0042) {
705 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { 705 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
706 PM8001_FAIL_DBG(pm8001_ha, 706 PM8001_FAIL_DBG(pm8001_ha,
707 pm8001_printk("Shift Bar4 to 0x%x failed\n", 707 pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -2502,11 +2502,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2502 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2502 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2503 ts->resp = SAS_TASK_UNDELIVERED; 2503 ts->resp = SAS_TASK_UNDELIVERED;
2504 ts->stat = SAS_QUEUE_FULL; 2504 ts->stat = SAS_QUEUE_FULL;
2505 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2505 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2506 mb();/*in order to force CPU ordering*/
2507 spin_unlock_irq(&pm8001_ha->lock);
2508 t->task_done(t);
2509 spin_lock_irq(&pm8001_ha->lock);
2510 return; 2506 return;
2511 } 2507 }
2512 break; 2508 break;
@@ -2522,11 +2518,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2522 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2518 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2523 ts->resp = SAS_TASK_UNDELIVERED; 2519 ts->resp = SAS_TASK_UNDELIVERED;
2524 ts->stat = SAS_QUEUE_FULL; 2520 ts->stat = SAS_QUEUE_FULL;
2525 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2521 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2526 mb();/*ditto*/
2527 spin_unlock_irq(&pm8001_ha->lock);
2528 t->task_done(t);
2529 spin_lock_irq(&pm8001_ha->lock);
2530 return; 2522 return;
2531 } 2523 }
2532 break; 2524 break;
@@ -2550,11 +2542,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2550 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); 2542 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
2551 ts->resp = SAS_TASK_UNDELIVERED; 2543 ts->resp = SAS_TASK_UNDELIVERED;
2552 ts->stat = SAS_QUEUE_FULL; 2544 ts->stat = SAS_QUEUE_FULL;
2553 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2545 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2554 mb();/* ditto*/
2555 spin_unlock_irq(&pm8001_ha->lock);
2556 t->task_done(t);
2557 spin_lock_irq(&pm8001_ha->lock);
2558 return; 2546 return;
2559 } 2547 }
2560 break; 2548 break;
@@ -2617,11 +2605,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2617 IO_DS_NON_OPERATIONAL); 2605 IO_DS_NON_OPERATIONAL);
2618 ts->resp = SAS_TASK_UNDELIVERED; 2606 ts->resp = SAS_TASK_UNDELIVERED;
2619 ts->stat = SAS_QUEUE_FULL; 2607 ts->stat = SAS_QUEUE_FULL;
2620 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2608 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2621 mb();/*ditto*/
2622 spin_unlock_irq(&pm8001_ha->lock);
2623 t->task_done(t);
2624 spin_lock_irq(&pm8001_ha->lock);
2625 return; 2609 return;
2626 } 2610 }
2627 break; 2611 break;
@@ -2641,11 +2625,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2641 IO_DS_IN_ERROR); 2625 IO_DS_IN_ERROR);
2642 ts->resp = SAS_TASK_UNDELIVERED; 2626 ts->resp = SAS_TASK_UNDELIVERED;
2643 ts->stat = SAS_QUEUE_FULL; 2627 ts->stat = SAS_QUEUE_FULL;
2644 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2628 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2645 mb();/*ditto*/
2646 spin_unlock_irq(&pm8001_ha->lock);
2647 t->task_done(t);
2648 spin_lock_irq(&pm8001_ha->lock);
2649 return; 2629 return;
2650 } 2630 }
2651 break; 2631 break;
@@ -2674,20 +2654,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2674 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2654 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2675 t, status, ts->resp, ts->stat)); 2655 t, status, ts->resp, ts->stat));
2676 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2656 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2677 } else if (t->uldd_task) { 2657 } else {
2678 spin_unlock_irqrestore(&t->task_state_lock, flags);
2679 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2680 mb();/* ditto */
2681 spin_unlock_irq(&pm8001_ha->lock);
2682 t->task_done(t);
2683 spin_lock_irq(&pm8001_ha->lock);
2684 } else if (!t->uldd_task) {
2685 spin_unlock_irqrestore(&t->task_state_lock, flags); 2658 spin_unlock_irqrestore(&t->task_state_lock, flags);
2686 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2659 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2687 mb();/*ditto*/
2688 spin_unlock_irq(&pm8001_ha->lock);
2689 t->task_done(t);
2690 spin_lock_irq(&pm8001_ha->lock);
2691 } 2660 }
2692} 2661}
2693 2662
@@ -2796,11 +2765,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2796 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2765 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2797 ts->resp = SAS_TASK_COMPLETE; 2766 ts->resp = SAS_TASK_COMPLETE;
2798 ts->stat = SAS_QUEUE_FULL; 2767 ts->stat = SAS_QUEUE_FULL;
2799 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2768 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2800 mb();/*ditto*/
2801 spin_unlock_irq(&pm8001_ha->lock);
2802 t->task_done(t);
2803 spin_lock_irq(&pm8001_ha->lock);
2804 return; 2769 return;
2805 } 2770 }
2806 break; 2771 break;
@@ -2909,20 +2874,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2909 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2874 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2910 t, event, ts->resp, ts->stat)); 2875 t, event, ts->resp, ts->stat));
2911 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2876 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2912 } else if (t->uldd_task) { 2877 } else {
2913 spin_unlock_irqrestore(&t->task_state_lock, flags);
2914 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2915 mb();/* ditto */
2916 spin_unlock_irq(&pm8001_ha->lock);
2917 t->task_done(t);
2918 spin_lock_irq(&pm8001_ha->lock);
2919 } else if (!t->uldd_task) {
2920 spin_unlock_irqrestore(&t->task_state_lock, flags); 2878 spin_unlock_irqrestore(&t->task_state_lock, flags);
2921 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2879 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2922 mb();/*ditto*/
2923 spin_unlock_irq(&pm8001_ha->lock);
2924 t->task_done(t);
2925 spin_lock_irq(&pm8001_ha->lock);
2926 } 2880 }
2927} 2881}
2928 2882
@@ -4467,23 +4421,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4467 " stat 0x%x but aborted by upper layer " 4421 " stat 0x%x but aborted by upper layer "
4468 "\n", task, ts->resp, ts->stat)); 4422 "\n", task, ts->resp, ts->stat));
4469 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4423 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4470 } else if (task->uldd_task) { 4424 } else {
4471 spin_unlock_irqrestore(&task->task_state_lock,
4472 flags);
4473 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4474 mb();/* ditto */
4475 spin_unlock_irq(&pm8001_ha->lock);
4476 task->task_done(task);
4477 spin_lock_irq(&pm8001_ha->lock);
4478 return 0;
4479 } else if (!task->uldd_task) {
4480 spin_unlock_irqrestore(&task->task_state_lock, 4425 spin_unlock_irqrestore(&task->task_state_lock,
4481 flags); 4426 flags);
4482 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4427 pm8001_ccb_task_free_done(pm8001_ha, task,
4483 mb();/*ditto*/ 4428 ccb, tag);
4484 spin_unlock_irq(&pm8001_ha->lock);
4485 task->task_done(task);
4486 spin_lock_irq(&pm8001_ha->lock);
4487 return 0; 4429 return 0;
4488 } 4430 }
4489 } 4431 }
@@ -5020,7 +4962,7 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
5020 /* check max is 1 Mbytes */ 4962 /* check max is 1 Mbytes */
5021 if ((length > 0x100000) || (gsm_dump_offset & 3) || 4963 if ((length > 0x100000) || (gsm_dump_offset & 3) ||
5022 ((gsm_dump_offset + length) > 0x1000000)) 4964 ((gsm_dump_offset + length) > 0x1000000))
5023 return 1; 4965 return -EINVAL;
5024 4966
5025 if (pm8001_ha->chip_id == chip_8001) 4967 if (pm8001_ha->chip_id == chip_8001)
5026 bar = 2; 4968 bar = 2;
@@ -5048,12 +4990,12 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
5048 gsm_base = GSM_BASE; 4990 gsm_base = GSM_BASE;
5049 if (-1 == pm8001_bar4_shift(pm8001_ha, 4991 if (-1 == pm8001_bar4_shift(pm8001_ha,
5050 (gsm_base + shift_value))) 4992 (gsm_base + shift_value)))
5051 return 1; 4993 return -EIO;
5052 } else { 4994 } else {
5053 gsm_base = 0; 4995 gsm_base = 0;
5054 if (-1 == pm80xx_bar4_shift(pm8001_ha, 4996 if (-1 == pm80xx_bar4_shift(pm8001_ha,
5055 (gsm_base + shift_value))) 4997 (gsm_base + shift_value)))
5056 return 1; 4998 return -EIO;
5057 } 4999 }
5058 gsm_dump_offset = (gsm_dump_offset + offset) & 5000 gsm_dump_offset = (gsm_dump_offset + offset) &
5059 0xFFFF0000; 5001 0xFFFF0000;
@@ -5072,13 +5014,8 @@ pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
5072 direct_data += sprintf(direct_data, "%08x ", value); 5014 direct_data += sprintf(direct_data, "%08x ", value);
5073 } 5015 }
5074 /* Shift back to BAR4 original address */ 5016 /* Shift back to BAR4 original address */
5075 if (pm8001_ha->chip_id == chip_8001) { 5017 if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
5076 if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) 5018 return -EIO;
5077 return 1;
5078 } else {
5079 if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
5080 return 1;
5081 }
5082 pm8001_ha->fatal_forensic_shift_offset += 1024; 5019 pm8001_ha->fatal_forensic_shift_offset += 1024;
5083 5020
5084 if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) 5021 if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 73a120d81b4d..c4f31b21feb8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -625,7 +625,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
625 pm8001_ha->nvmd_completion = &completion; 625 pm8001_ha->nvmd_completion = &completion;
626 626
627 if (pm8001_ha->chip_id == chip_8001) { 627 if (pm8001_ha->chip_id == chip_8001) {
628 if (deviceid == 0x8081) { 628 if (deviceid == 0x8081 || deviceid == 0x0042) {
629 payload.minor_function = 4; 629 payload.minor_function = 4;
630 payload.length = 4096; 630 payload.length = 4096;
631 } else { 631 } else {
@@ -646,6 +646,9 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
646 if (deviceid == 0x8081) 646 if (deviceid == 0x8081)
647 pm8001_ha->sas_addr[j] = 647 pm8001_ha->sas_addr[j] =
648 payload.func_specific[0x704 + i]; 648 payload.func_specific[0x704 + i];
649 else if (deviceid == 0x0042)
650 pm8001_ha->sas_addr[j] =
651 payload.func_specific[0x010 + i];
649 } else 652 } else
650 pm8001_ha->sas_addr[j] = 653 pm8001_ha->sas_addr[j] =
651 payload.func_specific[0x804 + i]; 654 payload.func_specific[0x804 + i];
@@ -713,11 +716,9 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
713 /* SPCv controllers supports 64 msi-x */ 716 /* SPCv controllers supports 64 msi-x */
714 if (pm8001_ha->chip_id == chip_8001) { 717 if (pm8001_ha->chip_id == chip_8001) {
715 number_of_intr = 1; 718 number_of_intr = 1;
716 flag |= IRQF_DISABLED;
717 } else { 719 } else {
718 number_of_intr = PM8001_MAX_MSIX_VEC; 720 number_of_intr = PM8001_MAX_MSIX_VEC;
719 flag &= ~IRQF_SHARED; 721 flag &= ~IRQF_SHARED;
720 flag |= IRQF_DISABLED;
721 } 722 }
722 723
723 max_entry = sizeof(pm8001_ha->msix_entries) / 724 max_entry = sizeof(pm8001_ha->msix_entries) /
@@ -1072,10 +1073,7 @@ err_out_enable:
1072 */ 1073 */
1073static struct pci_device_id pm8001_pci_table[] = { 1074static struct pci_device_id pm8001_pci_table[] = {
1074 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, 1075 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
1075 { 1076 { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
1076 PCI_DEVICE(0x117c, 0x0042),
1077 .driver_data = chip_8001
1078 },
1079 /* Support for SPC/SPCv/SPCve controllers */ 1077 /* Support for SPC/SPCv/SPCve controllers */
1080 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, 1078 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
1081 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, 1079 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f50ac44b950e..8a44bc92bc78 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -434,6 +434,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
434 ccb->n_elem = n_elem; 434 ccb->n_elem = n_elem;
435 ccb->ccb_tag = tag; 435 ccb->ccb_tag = tag;
436 ccb->task = t; 436 ccb->task = t;
437 ccb->device = pm8001_dev;
437 switch (t->task_proto) { 438 switch (t->task_proto) {
438 case SAS_PROTOCOL_SMP: 439 case SAS_PROTOCOL_SMP:
439 rc = pm8001_task_prep_smp(pm8001_ha, ccb); 440 rc = pm8001_task_prep_smp(pm8001_ha, ccb);
@@ -865,13 +866,11 @@ ex_err:
865static void pm8001_dev_gone_notify(struct domain_device *dev) 866static void pm8001_dev_gone_notify(struct domain_device *dev)
866{ 867{
867 unsigned long flags = 0; 868 unsigned long flags = 0;
868 u32 tag;
869 struct pm8001_hba_info *pm8001_ha; 869 struct pm8001_hba_info *pm8001_ha;
870 struct pm8001_device *pm8001_dev = dev->lldd_dev; 870 struct pm8001_device *pm8001_dev = dev->lldd_dev;
871 871
872 pm8001_ha = pm8001_find_ha_by_dev(dev); 872 pm8001_ha = pm8001_find_ha_by_dev(dev);
873 spin_lock_irqsave(&pm8001_ha->lock, flags); 873 spin_lock_irqsave(&pm8001_ha->lock, flags);
874 pm8001_tag_alloc(pm8001_ha, &tag);
875 if (pm8001_dev) { 874 if (pm8001_dev) {
876 u32 device_id = pm8001_dev->device_id; 875 u32 device_id = pm8001_dev->device_id;
877 876
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6c5fd5ee22d3..1ee06f21803b 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -708,5 +708,17 @@ ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
708/* ctl shared API */ 708/* ctl shared API */
709extern struct device_attribute *pm8001_host_attrs[]; 709extern struct device_attribute *pm8001_host_attrs[];
710 710
711static inline void
712pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
713 struct sas_task *task, struct pm8001_ccb_info *ccb,
714 u32 ccb_idx)
715{
716 pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
717 smp_mb(); /*in order to force CPU ordering*/
718 spin_unlock(&pm8001_ha->lock);
719 task->task_done(task);
720 spin_lock(&pm8001_ha->lock);
721}
722
711#endif 723#endif
712 724
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c950dc5c9943..d70587f96184 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -91,7 +91,6 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
91 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 91 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
92 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 92 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
93 void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; 93 void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
94 u32 status = 1;
95 u32 accum_len , reg_val, index, *temp; 94 u32 accum_len , reg_val, index, *temp;
96 unsigned long start; 95 unsigned long start;
97 u8 *direct_data; 96 u8 *direct_data;
@@ -111,13 +110,10 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
111 direct_data = (u8 *)fatal_error_data; 110 direct_data = (u8 *)fatal_error_data;
112 pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; 111 pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
113 pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; 112 pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
114 pm8001_ha->forensic_info.data_buf.direct_offset = 0;
115 pm8001_ha->forensic_info.data_buf.read_len = 0; 113 pm8001_ha->forensic_info.data_buf.read_len = 0;
116 114
117 pm8001_ha->forensic_info.data_buf.direct_data = direct_data; 115 pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
118 }
119 116
120 if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
121 /* start to get data */ 117 /* start to get data */
122 /* Program the MEMBASE II Shifting Register with 0x00.*/ 118 /* Program the MEMBASE II Shifting Register with 0x00.*/
123 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 119 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,6 +122,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
126 pm8001_ha->forensic_fatal_step = 0; 122 pm8001_ha->forensic_fatal_step = 0;
127 pm8001_ha->fatal_bar_loc = 0; 123 pm8001_ha->fatal_bar_loc = 0;
128 } 124 }
125
129 /* Read until accum_len is retrived */ 126 /* Read until accum_len is retrived */
130 accum_len = pm8001_mr32(fatal_table_address, 127 accum_len = pm8001_mr32(fatal_table_address,
131 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); 128 MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
@@ -135,7 +132,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
135 PM8001_IO_DBG(pm8001_ha, 132 PM8001_IO_DBG(pm8001_ha,
136 pm8001_printk("Possible PCI issue 0x%x not expected\n", 133 pm8001_printk("Possible PCI issue 0x%x not expected\n",
137 accum_len)); 134 accum_len));
138 return status; 135 return -EIO;
139 } 136 }
140 if (accum_len == 0 || accum_len >= 0x100000) { 137 if (accum_len == 0 || accum_len >= 0x100000) {
141 pm8001_ha->forensic_info.data_buf.direct_data += 138 pm8001_ha->forensic_info.data_buf.direct_data +=
@@ -178,7 +175,6 @@ moreData:
178 pm8001_ha->forensic_fatal_step = 1; 175 pm8001_ha->forensic_fatal_step = 1;
179 pm8001_ha->fatal_forensic_shift_offset = 0; 176 pm8001_ha->fatal_forensic_shift_offset = 0;
180 pm8001_ha->forensic_last_offset = 0; 177 pm8001_ha->forensic_last_offset = 0;
181 status = 0;
182 return (char *)pm8001_ha-> 178 return (char *)pm8001_ha->
183 forensic_info.data_buf.direct_data - 179 forensic_info.data_buf.direct_data -
184 (char *)buf; 180 (char *)buf;
@@ -194,7 +190,6 @@ moreData:
194 forensic_info.data_buf.direct_data, 190 forensic_info.data_buf.direct_data,
195 "%08x ", *(temp + index)); 191 "%08x ", *(temp + index));
196 } 192 }
197 status = 0;
198 return (char *)pm8001_ha-> 193 return (char *)pm8001_ha->
199 forensic_info.data_buf.direct_data - 194 forensic_info.data_buf.direct_data -
200 (char *)buf; 195 (char *)buf;
@@ -214,7 +209,6 @@ moreData:
214 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, 209 pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
215 pm8001_ha->fatal_forensic_shift_offset); 210 pm8001_ha->fatal_forensic_shift_offset);
216 pm8001_ha->fatal_bar_loc = 0; 211 pm8001_ha->fatal_bar_loc = 0;
217 status = 0;
218 return (char *)pm8001_ha->forensic_info.data_buf.direct_data - 212 return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
219 (char *)buf; 213 (char *)buf;
220 } 214 }
@@ -239,7 +233,7 @@ moreData:
239 PM8001_FAIL_DBG(pm8001_ha, 233 PM8001_FAIL_DBG(pm8001_ha,
240 pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" 234 pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
241 " = 0x%x\n", reg_val)); 235 " = 0x%x\n", reg_val));
242 return -1; 236 return -EIO;
243 } 237 }
244 238
245 /* Read the next 64K of the debug data. */ 239 /* Read the next 64K of the debug data. */
@@ -259,7 +253,6 @@ moreData:
259 pm8001_ha->forensic_info.data_buf.direct_len = 0; 253 pm8001_ha->forensic_info.data_buf.direct_len = 0;
260 pm8001_ha->forensic_info.data_buf.direct_offset = 0; 254 pm8001_ha->forensic_info.data_buf.direct_offset = 0;
261 pm8001_ha->forensic_info.data_buf.read_len = 0; 255 pm8001_ha->forensic_info.data_buf.read_len = 0;
262 status = 0;
263 } 256 }
264 } 257 }
265 258
@@ -2175,11 +2168,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2175 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2168 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2176 ts->resp = SAS_TASK_UNDELIVERED; 2169 ts->resp = SAS_TASK_UNDELIVERED;
2177 ts->stat = SAS_QUEUE_FULL; 2170 ts->stat = SAS_QUEUE_FULL;
2178 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2171 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2179 mb();/*in order to force CPU ordering*/
2180 spin_unlock_irq(&pm8001_ha->lock);
2181 t->task_done(t);
2182 spin_lock_irq(&pm8001_ha->lock);
2183 return; 2172 return;
2184 } 2173 }
2185 break; 2174 break;
@@ -2195,11 +2184,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2195 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2184 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2196 ts->resp = SAS_TASK_UNDELIVERED; 2185 ts->resp = SAS_TASK_UNDELIVERED;
2197 ts->stat = SAS_QUEUE_FULL; 2186 ts->stat = SAS_QUEUE_FULL;
2198 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2187 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2199 mb();/*ditto*/
2200 spin_unlock_irq(&pm8001_ha->lock);
2201 t->task_done(t);
2202 spin_lock_irq(&pm8001_ha->lock);
2203 return; 2188 return;
2204 } 2189 }
2205 break; 2190 break;
@@ -2221,11 +2206,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2221 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); 2206 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
2222 ts->resp = SAS_TASK_UNDELIVERED; 2207 ts->resp = SAS_TASK_UNDELIVERED;
2223 ts->stat = SAS_QUEUE_FULL; 2208 ts->stat = SAS_QUEUE_FULL;
2224 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2209 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2225 mb();/* ditto*/
2226 spin_unlock_irq(&pm8001_ha->lock);
2227 t->task_done(t);
2228 spin_lock_irq(&pm8001_ha->lock);
2229 return; 2210 return;
2230 } 2211 }
2231 break; 2212 break;
@@ -2288,11 +2269,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2288 IO_DS_NON_OPERATIONAL); 2269 IO_DS_NON_OPERATIONAL);
2289 ts->resp = SAS_TASK_UNDELIVERED; 2270 ts->resp = SAS_TASK_UNDELIVERED;
2290 ts->stat = SAS_QUEUE_FULL; 2271 ts->stat = SAS_QUEUE_FULL;
2291 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2272 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2292 mb();/*ditto*/
2293 spin_unlock_irq(&pm8001_ha->lock);
2294 t->task_done(t);
2295 spin_lock_irq(&pm8001_ha->lock);
2296 return; 2273 return;
2297 } 2274 }
2298 break; 2275 break;
@@ -2312,11 +2289,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2312 IO_DS_IN_ERROR); 2289 IO_DS_IN_ERROR);
2313 ts->resp = SAS_TASK_UNDELIVERED; 2290 ts->resp = SAS_TASK_UNDELIVERED;
2314 ts->stat = SAS_QUEUE_FULL; 2291 ts->stat = SAS_QUEUE_FULL;
2315 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2292 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2316 mb();/*ditto*/
2317 spin_unlock_irq(&pm8001_ha->lock);
2318 t->task_done(t);
2319 spin_lock_irq(&pm8001_ha->lock);
2320 return; 2293 return;
2321 } 2294 }
2322 break; 2295 break;
@@ -2345,20 +2318,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2345 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2318 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2346 t, status, ts->resp, ts->stat)); 2319 t, status, ts->resp, ts->stat));
2347 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2320 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2348 } else if (t->uldd_task) { 2321 } else {
2349 spin_unlock_irqrestore(&t->task_state_lock, flags);
2350 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2351 mb();/* ditto */
2352 spin_unlock_irq(&pm8001_ha->lock);
2353 t->task_done(t);
2354 spin_lock_irq(&pm8001_ha->lock);
2355 } else if (!t->uldd_task) {
2356 spin_unlock_irqrestore(&t->task_state_lock, flags); 2322 spin_unlock_irqrestore(&t->task_state_lock, flags);
2357 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2323 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2358 mb();/*ditto*/
2359 spin_unlock_irq(&pm8001_ha->lock);
2360 t->task_done(t);
2361 spin_lock_irq(&pm8001_ha->lock);
2362 } 2324 }
2363} 2325}
2364 2326
@@ -2470,11 +2432,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2470 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2432 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2471 ts->resp = SAS_TASK_COMPLETE; 2433 ts->resp = SAS_TASK_COMPLETE;
2472 ts->stat = SAS_QUEUE_FULL; 2434 ts->stat = SAS_QUEUE_FULL;
2473 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2435 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2474 mb();/*ditto*/
2475 spin_unlock_irq(&pm8001_ha->lock);
2476 t->task_done(t);
2477 spin_lock_irq(&pm8001_ha->lock);
2478 return; 2436 return;
2479 } 2437 }
2480 break; 2438 break;
@@ -2596,20 +2554,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2596 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2554 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2597 t, event, ts->resp, ts->stat)); 2555 t, event, ts->resp, ts->stat));
2598 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2556 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2599 } else if (t->uldd_task) { 2557 } else {
2600 spin_unlock_irqrestore(&t->task_state_lock, flags);
2601 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2602 mb();/* ditto */
2603 spin_unlock_irq(&pm8001_ha->lock);
2604 t->task_done(t);
2605 spin_lock_irq(&pm8001_ha->lock);
2606 } else if (!t->uldd_task) {
2607 spin_unlock_irqrestore(&t->task_state_lock, flags); 2558 spin_unlock_irqrestore(&t->task_state_lock, flags);
2608 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2559 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
2609 mb();/*ditto*/
2610 spin_unlock_irq(&pm8001_ha->lock);
2611 t->task_done(t);
2612 spin_lock_irq(&pm8001_ha->lock);
2613 } 2560 }
2614} 2561}
2615 2562
@@ -4304,23 +4251,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4304 "\n", task, ts->resp, ts->stat)); 4251 "\n", task, ts->resp, ts->stat));
4305 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4252 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4306 return 0; 4253 return 0;
4307 } else if (task->uldd_task) { 4254 } else {
4308 spin_unlock_irqrestore(&task->task_state_lock,
4309 flags);
4310 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4311 mb();/* ditto */
4312 spin_unlock_irq(&pm8001_ha->lock);
4313 task->task_done(task);
4314 spin_lock_irq(&pm8001_ha->lock);
4315 return 0;
4316 } else if (!task->uldd_task) {
4317 spin_unlock_irqrestore(&task->task_state_lock, 4255 spin_unlock_irqrestore(&task->task_state_lock,
4318 flags); 4256 flags);
4319 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 4257 pm8001_ccb_task_free_done(pm8001_ha, task,
4320 mb();/*ditto*/ 4258 ccb, tag);
4321 spin_unlock_irq(&pm8001_ha->lock);
4322 task->task_done(task);
4323 spin_lock_irq(&pm8001_ha->lock);
4324 return 0; 4259 return 0;
4325 } 4260 }
4326 } 4261 }
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c7812f..44def6bb4bb0 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o qla_mr.o qla_nx2.o qla_target.o 3 qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o 6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4a0d7c92181f..07befcf365b8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = {
147}; 147};
148 148
149static ssize_t 149static ssize_t
150qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
151 struct bin_attribute *bin_attr,
152 char *buf, loff_t off, size_t count)
153{
154 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
155 struct device, kobj)));
156 struct qla_hw_data *ha = vha->hw;
157
158 if (!ha->fw_dump_template || !ha->fw_dump_template_len)
159 return 0;
160
161 ql_dbg(ql_dbg_user, vha, 0x70e2,
162 "chunk <- off=%llx count=%zx\n", off, count);
163 return memory_read_from_buffer(buf, count, &off,
164 ha->fw_dump_template, ha->fw_dump_template_len);
165}
166
167static ssize_t
168qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
169 struct bin_attribute *bin_attr,
170 char *buf, loff_t off, size_t count)
171{
172 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
173 struct device, kobj)));
174 struct qla_hw_data *ha = vha->hw;
175 uint32_t size;
176
177 if (off == 0) {
178 if (ha->fw_dump)
179 vfree(ha->fw_dump);
180 if (ha->fw_dump_template)
181 vfree(ha->fw_dump_template);
182
183 ha->fw_dump = NULL;
184 ha->fw_dump_len = 0;
185 ha->fw_dump_template = NULL;
186 ha->fw_dump_template_len = 0;
187
188 size = qla27xx_fwdt_template_size(buf);
189 ql_dbg(ql_dbg_user, vha, 0x70d1,
190 "-> allocating fwdt (%x bytes)...\n", size);
191 ha->fw_dump_template = vmalloc(size);
192 if (!ha->fw_dump_template) {
193 ql_log(ql_log_warn, vha, 0x70d2,
194 "Failed allocate fwdt (%x bytes).\n", size);
195 return -ENOMEM;
196 }
197 ha->fw_dump_template_len = size;
198 }
199
200 if (off + count > ha->fw_dump_template_len) {
201 count = ha->fw_dump_template_len - off;
202 ql_dbg(ql_dbg_user, vha, 0x70d3,
203 "chunk -> truncating to %zx bytes.\n", count);
204 }
205
206 ql_dbg(ql_dbg_user, vha, 0x70d4,
207 "chunk -> off=%llx count=%zx\n", off, count);
208 memcpy(ha->fw_dump_template + off, buf, count);
209
210 if (off + count == ha->fw_dump_template_len) {
211 size = qla27xx_fwdt_calculate_dump_size(vha);
212 ql_dbg(ql_dbg_user, vha, 0x70d5,
213 "-> allocating fwdump (%x bytes)...\n", size);
214 ha->fw_dump = vmalloc(size);
215 if (!ha->fw_dump) {
216 ql_log(ql_log_warn, vha, 0x70d6,
217 "Failed allocate fwdump (%x bytes).\n", size);
218 return -ENOMEM;
219 }
220 ha->fw_dump_len = size;
221 }
222
223 return count;
224}
225static struct bin_attribute sysfs_fw_dump_template_attr = {
226 .attr = {
227 .name = "fw_dump_template",
228 .mode = S_IRUSR | S_IWUSR,
229 },
230 .size = 0,
231 .read = qla2x00_sysfs_read_fw_dump_template,
232 .write = qla2x00_sysfs_write_fw_dump_template,
233};
234
235static ssize_t
150qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 236qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
151 struct bin_attribute *bin_attr, 237 struct bin_attribute *bin_attr,
152 char *buf, loff_t off, size_t count) 238 char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
241 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 327 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
242 struct device, kobj))); 328 struct device, kobj)));
243 struct qla_hw_data *ha = vha->hw; 329 struct qla_hw_data *ha = vha->hw;
330 ssize_t rval = 0;
244 331
245 if (ha->optrom_state != QLA_SREADING) 332 if (ha->optrom_state != QLA_SREADING)
246 return 0; 333 return 0;
247 334
248 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 335 mutex_lock(&ha->optrom_mutex);
249 ha->optrom_region_size); 336 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
337 ha->optrom_region_size);
338 mutex_unlock(&ha->optrom_mutex);
339
340 return rval;
250} 341}
251 342
252static ssize_t 343static ssize_t
@@ -265,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
265 if (off + count > ha->optrom_region_size) 356 if (off + count > ha->optrom_region_size)
266 count = ha->optrom_region_size - off; 357 count = ha->optrom_region_size - off;
267 358
359 mutex_lock(&ha->optrom_mutex);
268 memcpy(&ha->optrom_buffer[off], buf, count); 360 memcpy(&ha->optrom_buffer[off], buf, count);
361 mutex_unlock(&ha->optrom_mutex);
269 362
270 return count; 363 return count;
271} 364}
@@ -288,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
288 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 381 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
289 struct device, kobj))); 382 struct device, kobj)));
290 struct qla_hw_data *ha = vha->hw; 383 struct qla_hw_data *ha = vha->hw;
291
292 uint32_t start = 0; 384 uint32_t start = 0;
293 uint32_t size = ha->optrom_size; 385 uint32_t size = ha->optrom_size;
294 int val, valid; 386 int val, valid;
387 ssize_t rval = count;
295 388
296 if (off) 389 if (off)
297 return -EINVAL; 390 return -EINVAL;
@@ -304,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
304 if (start > ha->optrom_size) 397 if (start > ha->optrom_size)
305 return -EINVAL; 398 return -EINVAL;
306 399
400 mutex_lock(&ha->optrom_mutex);
307 switch (val) { 401 switch (val) {
308 case 0: 402 case 0:
309 if (ha->optrom_state != QLA_SREADING && 403 if (ha->optrom_state != QLA_SREADING &&
310 ha->optrom_state != QLA_SWRITING) 404 ha->optrom_state != QLA_SWRITING) {
311 return -EINVAL; 405 rval = -EINVAL;
312 406 goto out;
407 }
313 ha->optrom_state = QLA_SWAITING; 408 ha->optrom_state = QLA_SWAITING;
314 409
315 ql_dbg(ql_dbg_user, vha, 0x7061, 410 ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
320 ha->optrom_buffer = NULL; 415 ha->optrom_buffer = NULL;
321 break; 416 break;
322 case 1: 417 case 1:
323 if (ha->optrom_state != QLA_SWAITING) 418 if (ha->optrom_state != QLA_SWAITING) {
324 return -EINVAL; 419 rval = -EINVAL;
420 goto out;
421 }
325 422
326 ha->optrom_region_start = start; 423 ha->optrom_region_start = start;
327 ha->optrom_region_size = start + size > ha->optrom_size ? 424 ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
335 "(%x).\n", ha->optrom_region_size); 432 "(%x).\n", ha->optrom_region_size);
336 433
337 ha->optrom_state = QLA_SWAITING; 434 ha->optrom_state = QLA_SWAITING;
338 return -ENOMEM; 435 rval = -ENOMEM;
436 goto out;
339 } 437 }
340 438
341 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 439 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
342 ql_log(ql_log_warn, vha, 0x7063, 440 ql_log(ql_log_warn, vha, 0x7063,
343 "HBA not online, failing NVRAM update.\n"); 441 "HBA not online, failing NVRAM update.\n");
344 return -EAGAIN; 442 rval = -EAGAIN;
443 goto out;
345 } 444 }
346 445
347 ql_dbg(ql_dbg_user, vha, 0x7064, 446 ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
353 ha->optrom_region_start, ha->optrom_region_size); 452 ha->optrom_region_start, ha->optrom_region_size);
354 break; 453 break;
355 case 2: 454 case 2:
356 if (ha->optrom_state != QLA_SWAITING) 455 if (ha->optrom_state != QLA_SWAITING) {
357 return -EINVAL; 456 rval = -EINVAL;
457 goto out;
458 }
358 459
359 /* 460 /*
360 * We need to be more restrictive on which FLASH regions are 461 * We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
388 if (!valid) { 489 if (!valid) {
389 ql_log(ql_log_warn, vha, 0x7065, 490 ql_log(ql_log_warn, vha, 0x7065,
390 "Invalid start region 0x%x/0x%x.\n", start, size); 491 "Invalid start region 0x%x/0x%x.\n", start, size);
391 return -EINVAL; 492 rval = -EINVAL;
493 goto out;
392 } 494 }
393 495
394 ha->optrom_region_start = start; 496 ha->optrom_region_start = start;
@@ -403,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
403 "(%x)\n", ha->optrom_region_size); 505 "(%x)\n", ha->optrom_region_size);
404 506
405 ha->optrom_state = QLA_SWAITING; 507 ha->optrom_state = QLA_SWAITING;
406 return -ENOMEM; 508 rval = -ENOMEM;
509 goto out;
407 } 510 }
408 511
409 ql_dbg(ql_dbg_user, vha, 0x7067, 512 ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
413 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 516 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
414 break; 517 break;
415 case 3: 518 case 3:
416 if (ha->optrom_state != QLA_SWRITING) 519 if (ha->optrom_state != QLA_SWRITING) {
417 return -EINVAL; 520 rval = -EINVAL;
521 goto out;
522 }
418 523
419 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 524 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
420 ql_log(ql_log_warn, vha, 0x7068, 525 ql_log(ql_log_warn, vha, 0x7068,
421 "HBA not online, failing flash update.\n"); 526 "HBA not online, failing flash update.\n");
422 return -EAGAIN; 527 rval = -EAGAIN;
528 goto out;
423 } 529 }
424 530
425 ql_dbg(ql_dbg_user, vha, 0x7069, 531 ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
430 ha->optrom_region_start, ha->optrom_region_size); 536 ha->optrom_region_start, ha->optrom_region_size);
431 break; 537 break;
432 default: 538 default:
433 return -EINVAL; 539 rval = -EINVAL;
434 } 540 }
435 return count; 541
542out:
543 mutex_unlock(&ha->optrom_mutex);
544 return rval;
436} 545}
437 546
438static struct bin_attribute sysfs_optrom_ctl_attr = { 547static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@ static struct sysfs_entry {
822 int is4GBp_only; 931 int is4GBp_only;
823} bin_file_entries[] = { 932} bin_file_entries[] = {
824 { "fw_dump", &sysfs_fw_dump_attr, }, 933 { "fw_dump", &sysfs_fw_dump_attr, },
934 { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
825 { "nvram", &sysfs_nvram_attr, }, 935 { "nvram", &sysfs_nvram_attr, },
826 { "optrom", &sysfs_optrom_attr, }, 936 { "optrom", &sysfs_optrom_attr, },
827 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 937 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
847 continue; 957 continue;
848 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 958 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
849 continue; 959 continue;
960 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
961 continue;
850 962
851 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 963 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
852 iter->attr); 964 iter->attr);
@@ -1187,7 +1299,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
1187 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1299 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1188 struct qla_hw_data *ha = vha->hw; 1300 struct qla_hw_data *ha = vha->hw;
1189 1301
1190 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1302 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1191 return scnprintf(buf, PAGE_SIZE, "\n"); 1303 return scnprintf(buf, PAGE_SIZE, "\n");
1192 1304
1193 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1305 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1391 return scnprintf(buf, PAGE_SIZE, "%d\n", size); 1503 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1392} 1504}
1393 1505
1506static ssize_t
1507qla2x00_allow_cna_fw_dump_show(struct device *dev,
1508 struct device_attribute *attr, char *buf)
1509{
1510 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1511
1512 if (!IS_P3P_TYPE(vha->hw))
1513 return scnprintf(buf, PAGE_SIZE, "\n");
1514 else
1515 return scnprintf(buf, PAGE_SIZE, "%s\n",
1516 vha->hw->allow_cna_fw_dump ? "true" : "false");
1517}
1518
1519static ssize_t
1520qla2x00_allow_cna_fw_dump_store(struct device *dev,
1521 struct device_attribute *attr, const char *buf, size_t count)
1522{
1523 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1524 int val = 0;
1525
1526 if (!IS_P3P_TYPE(vha->hw))
1527 return -EINVAL;
1528
1529 if (sscanf(buf, "%d", &val) != 1)
1530 return -EINVAL;
1531
1532 vha->hw->allow_cna_fw_dump = val != 0;
1533
1534 return strlen(buf);
1535}
1536
1394static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1537static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1395static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1538static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1396static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1539static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1432static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 1575static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
1433static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 1576static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1434static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 1577static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1578static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
1579 qla2x00_allow_cna_fw_dump_show,
1580 qla2x00_allow_cna_fw_dump_store);
1435 1581
1436struct device_attribute *qla2x00_host_attrs[] = { 1582struct device_attribute *qla2x00_host_attrs[] = {
1437 &dev_attr_driver_version, 1583 &dev_attr_driver_version,
@@ -1464,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1464 &dev_attr_diag_requests, 1610 &dev_attr_diag_requests,
1465 &dev_attr_diag_megabytes, 1611 &dev_attr_diag_megabytes,
1466 &dev_attr_fw_dump_size, 1612 &dev_attr_fw_dump_size,
1613 &dev_attr_allow_cna_fw_dump,
1467 NULL, 1614 NULL,
1468}; 1615};
1469 1616
@@ -1509,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
1509 case PORT_SPEED_16GB: 1656 case PORT_SPEED_16GB:
1510 speed = FC_PORTSPEED_16GBIT; 1657 speed = FC_PORTSPEED_16GBIT;
1511 break; 1658 break;
1659 case PORT_SPEED_32GB:
1660 speed = FC_PORTSPEED_32GBIT;
1661 break;
1512 } 1662 }
1513 fc_host_speed(shost) = speed; 1663 fc_host_speed(shost) = speed;
1514} 1664}
@@ -2160,6 +2310,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2160 else if (IS_QLAFX00(ha)) 2310 else if (IS_QLAFX00(ha))
2161 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2311 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2162 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2312 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2313 else if (IS_QLA27XX(ha))
2314 speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
2315 FC_PORTSPEED_8GBIT;
2163 else 2316 else
2164 speed = FC_PORTSPEED_1GBIT; 2317 speed = FC_PORTSPEED_1GBIT;
2165 fc_host_supported_speeds(vha->host) = speed; 2318 fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e6b7ee..71ff340f6de4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1437 if (ha->flags.nic_core_reset_hdlr_active) 1437 if (ha->flags.nic_core_reset_hdlr_active)
1438 return -EBUSY; 1438 return -EBUSY;
1439 1439
1440 mutex_lock(&ha->optrom_mutex);
1440 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1441 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1441 if (rval) 1442 if (rval) {
1443 mutex_unlock(&ha->optrom_mutex);
1442 return rval; 1444 return rval;
1445 }
1443 1446
1444 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1447 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1445 ha->optrom_region_start, ha->optrom_region_size); 1448 ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1453 vfree(ha->optrom_buffer); 1456 vfree(ha->optrom_buffer);
1454 ha->optrom_buffer = NULL; 1457 ha->optrom_buffer = NULL;
1455 ha->optrom_state = QLA_SWAITING; 1458 ha->optrom_state = QLA_SWAITING;
1459 mutex_unlock(&ha->optrom_mutex);
1456 bsg_job->job_done(bsg_job); 1460 bsg_job->job_done(bsg_job);
1457 return rval; 1461 return rval;
1458} 1462}
@@ -1465,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1465 struct qla_hw_data *ha = vha->hw; 1469 struct qla_hw_data *ha = vha->hw;
1466 int rval = 0; 1470 int rval = 0;
1467 1471
1472 mutex_lock(&ha->optrom_mutex);
1468 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1473 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1469 if (rval) 1474 if (rval) {
1475 mutex_unlock(&ha->optrom_mutex);
1470 return rval; 1476 return rval;
1477 }
1471 1478
1472 /* Set the isp82xx_no_md_cap not to capture minidump */ 1479 /* Set the isp82xx_no_md_cap not to capture minidump */
1473 ha->flags.isp82xx_no_md_cap = 1; 1480 ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1483 vfree(ha->optrom_buffer); 1490 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL; 1491 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING; 1492 ha->optrom_state = QLA_SWAITING;
1493 mutex_unlock(&ha->optrom_mutex);
1486 bsg_job->job_done(bsg_job); 1494 bsg_job->job_done(bsg_job);
1487 return rval; 1495 return rval;
1488} 1496}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f553bb1..97255f7c3975 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x015b | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
15 * | | | 0x0x015a | 15 * | | | 0x0144,0x0146 |
16 * | Mailbox commands | 0x1187 | 0x111a-0x111b | 16 * | | | 0x015b-0x0160 |
17 * | | | 0x1155-0x1158 | 17 * | | | 0x016e-0x0170 |
18 * | | | 0x1018-0x1019 | 18 * | Mailbox commands | 0x1187 | 0x1018-0x1019 |
19 * | | | 0x10ca |
19 * | | | 0x1115-0x1116 | 20 * | | | 0x1115-0x1116 |
20 * | | | 0x10ca | 21 * | | | 0x111a-0x111b |
22 * | | | 0x1155-0x1158 |
21 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 23 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
22 * | | | 0x2011-0x2012, | 24 * | | | 0x2011-0x2012, |
23 * | | | 0x2016 | 25 * | | | 0x2016 |
@@ -32,18 +34,17 @@
32 * | | | 0x5047,0x5052 | 34 * | | | 0x5047,0x5052 |
33 * | | | 0x5084,0x5075 | 35 * | | | 0x5084,0x5075 |
34 * | | | 0x503d,0x5044 | 36 * | | | 0x503d,0x5044 |
37 * | | | 0x507b |
35 * | Timer Routines | 0x6012 | | 38 * | Timer Routines | 0x6012 | |
36 * | User Space Interactions | 0x70e1 | 0x7018,0x702e, | 39 * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
37 * | | | 0x7020,0x7024, | 40 * | | | 0x7020,0x7024 |
38 * | | | 0x7039,0x7045, | 41 * | | | 0x7039,0x7045 |
39 * | | | 0x7073-0x7075, | 42 * | | | 0x7073-0x7075 |
40 * | | | 0x707b,0x708c, | 43 * | | | 0x70a5-0x70a6 |
41 * | | | 0x70a5,0x70a6, | 44 * | | | 0x70a8,0x70ab |
42 * | | | 0x70a8,0x70ab, | 45 * | | | 0x70ad-0x70ae |
43 * | | | 0x70ad-0x70ae, | 46 * | | | 0x70d7-0x70db |
44 * | | | 0x70d1-0x70db, | 47 * | | | 0x70de-0x70df |
45 * | | | 0x7047,0x703b |
46 * | | | 0x70de-0x70df, |
47 * | Task Management | 0x803d | 0x8025-0x8026 | 48 * | Task Management | 0x803d | 0x8025-0x8026 |
48 * | | | 0x800b,0x8039 | 49 * | | | 0x800b,0x8039 |
49 * | AER/EEH | 0x9011 | | 50 * | AER/EEH | 0x9011 | |
@@ -59,7 +60,11 @@
59 * | | | 0xb13c-0xb140 | 60 * | | | 0xb13c-0xb140 |
60 * | | | 0xb149 | 61 * | | | 0xb149 |
61 * | MultiQ | 0xc00c | | 62 * | MultiQ | 0xc00c | |
62 * | Misc | 0xd010 | | 63 * | Misc | 0xd2ff | 0xd017-0xd019 |
64 * | | | 0xd020 |
65 * | | | 0xd02e-0xd0ff |
66 * | | | 0xd101-0xd1fe |
67 * | | | 0xd212-0xd2fe |
63 * | Target Mode | 0xe070 | 0xe021 | 68 * | Target Mode | 0xe070 | 0xe021 |
64 * | Target Mode Management | 0xf072 | 0xf002-0xf003 | 69 * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
65 * | | | 0xf046-0xf049 | 70 * | | | 0xf046-0xf049 |
@@ -104,7 +109,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
104 return ptr + (rsp->length * sizeof(response_t)); 109 return ptr + (rsp->length * sizeof(response_t));
105} 110}
106 111
107static int 112int
113qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
114 uint32_t ram_dwords, void **nxt)
115{
116 int rval;
117 uint32_t cnt, stat, timer, dwords, idx;
118 uint16_t mb0, mb1;
119 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
120 dma_addr_t dump_dma = ha->gid_list_dma;
121 uint32_t *dump = (uint32_t *)ha->gid_list;
122
123 rval = QLA_SUCCESS;
124 mb0 = 0;
125
126 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
127 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
128
129 dwords = qla2x00_gid_list_size(ha) / 4;
130 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
131 cnt += dwords, addr += dwords) {
132 if (cnt + dwords > ram_dwords)
133 dwords = ram_dwords - cnt;
134
135 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
136 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
137
138 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
139 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
140 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
141 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
142
143 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
144 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
145
146 WRT_REG_WORD(&reg->mailbox9, 0);
147 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
148
149 ha->flags.mbox_int = 0;
150 for (timer = 6000000; timer; timer--) {
151 /* Check for pending interrupts. */
152 stat = RD_REG_DWORD(&reg->host_status);
153 if (stat & HSRX_RISC_INT) {
154 stat &= 0xff;
155
156 if (stat == 0x1 || stat == 0x2 ||
157 stat == 0x10 || stat == 0x11) {
158 set_bit(MBX_INTERRUPT,
159 &ha->mbx_cmd_flags);
160
161 mb0 = RD_REG_WORD(&reg->mailbox0);
162 mb1 = RD_REG_WORD(&reg->mailbox1);
163
164 WRT_REG_DWORD(&reg->hccr,
165 HCCRX_CLR_RISC_INT);
166 RD_REG_DWORD(&reg->hccr);
167 break;
168 }
169
170 /* Clear this intr; it wasn't a mailbox intr */
171 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
172 RD_REG_DWORD(&reg->hccr);
173 }
174 udelay(5);
175 }
176 ha->flags.mbox_int = 1;
177
178 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
179 rval = mb0 & MBS_MASK;
180 for (idx = 0; idx < dwords; idx++)
181 ram[cnt + idx] = IS_QLA27XX(ha) ?
182 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
183 } else {
184 rval = QLA_FUNCTION_FAILED;
185 }
186 }
187
188 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
189 return rval;
190}
191
192int
108qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 193qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
109 uint32_t ram_dwords, void **nxt) 194 uint32_t ram_dwords, void **nxt)
110{ 195{
@@ -139,6 +224,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
139 WRT_REG_WORD(&reg->mailbox5, LSW(dwords)); 224 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
140 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); 225 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
141 226
227 ha->flags.mbox_int = 0;
142 for (timer = 6000000; timer; timer--) { 228 for (timer = 6000000; timer; timer--) {
143 /* Check for pending interrupts. */ 229 /* Check for pending interrupts. */
144 stat = RD_REG_DWORD(&reg->host_status); 230 stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
164 } 250 }
165 udelay(5); 251 udelay(5);
166 } 252 }
253 ha->flags.mbox_int = 1;
167 254
168 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 255 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
169 rval = mb0 & MBS_MASK; 256 rval = mb0 & MBS_MASK;
170 for (idx = 0; idx < dwords; idx++) 257 for (idx = 0; idx < dwords; idx++)
171 ram[cnt + idx] = swab32(dump[idx]); 258 ram[cnt + idx] = IS_QLA27XX(ha) ?
259 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
172 } else { 260 } else {
173 rval = QLA_FUNCTION_FAILED; 261 rval = QLA_FUNCTION_FAILED;
174 } 262 }
@@ -208,7 +296,7 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
208 return buf; 296 return buf;
209} 297}
210 298
211static inline int 299int
212qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) 300qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
213{ 301{
214 int rval = QLA_SUCCESS; 302 int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
227 return rval; 315 return rval;
228} 316}
229 317
230static int 318int
231qla24xx_soft_reset(struct qla_hw_data *ha) 319qla24xx_soft_reset(struct qla_hw_data *ha)
232{ 320{
233 int rval = QLA_SUCCESS; 321 int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
537 struct qla2xxx_mq_chain *mq = ptr; 625 struct qla2xxx_mq_chain *mq = ptr;
538 device_reg_t __iomem *reg; 626 device_reg_t __iomem *reg;
539 627
540 if (!ha->mqenable || IS_QLA83XX(ha)) 628 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
541 return ptr; 629 return ptr;
542 630
543 mq = ptr; 631 mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4f8b6c..cc961040f8b1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
348#define ql_dbg_tgt 0x00004000 /* Target mode */ 348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ 349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ 350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
351
352extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
353 uint32_t, void **);
354extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
355 uint32_t, void **);
356extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
357extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 266724b6b899..6a106136716c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@ typedef union {
654 struct device_reg_25xxmq isp25mq; 654 struct device_reg_25xxmq isp25mq;
655 struct device_reg_82xx isp82; 655 struct device_reg_82xx isp82;
656 struct device_reg_fx00 ispfx00; 656 struct device_reg_fx00 ispfx00;
657} device_reg_t; 657} __iomem device_reg_t;
658 658
659#define ISP_REQ_Q_IN(ha, reg) \ 659#define ISP_REQ_Q_IN(ha, reg) \
660 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ 660 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@ struct mbx_cmd_32 {
808 Notification */ 808 Notification */
809#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ 809#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
810#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */ 810#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
811 811#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */
812/* 83XX FCoE specific */ 812/* 83XX FCoE specific */
813#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ 813#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
814 814
@@ -938,6 +938,7 @@ struct mbx_cmd_32 {
938 */ 938 */
939#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */ 939#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
940#define MBC_READ_SERDES 0x4 /* Read serdes word. */ 940#define MBC_READ_SERDES 0x4 /* Read serdes word. */
941#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */
941#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 942#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
942#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ 943#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
943#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */ 944#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@ typedef struct {
1197 uint8_t reserved_3[26]; 1198 uint8_t reserved_3[26];
1198} init_cb_t; 1199} init_cb_t;
1199 1200
1200
1201struct init_cb_fx {
1202 uint16_t version;
1203 uint16_t reserved_1[13];
1204 __le16 request_q_outpointer;
1205 __le16 response_q_inpointer;
1206 uint16_t reserved_2[2];
1207 __le16 response_q_length;
1208 __le16 request_q_length;
1209 uint16_t reserved_3[2];
1210 __le32 request_q_address[2];
1211 __le32 response_q_address[2];
1212 uint16_t reserved_4[4];
1213 uint8_t response_q_msivec;
1214 uint8_t reserved_5[19];
1215 uint16_t interrupt_delay_timer;
1216 uint16_t reserved_6;
1217 uint32_t fwoptions1;
1218 uint32_t fwoptions2;
1219 uint32_t fwoptions3;
1220 uint8_t reserved_7[24];
1221};
1222
1223
1224/* 1201/*
1225 * Get Link Status mailbox command return buffer. 1202 * Get Link Status mailbox command return buffer.
1226 */ 1203 */
@@ -2172,6 +2149,7 @@ struct ct_fdmi_hba_attributes {
2172#define FDMI_PORT_SPEED_4GB 0x8 2149#define FDMI_PORT_SPEED_4GB 0x8
2173#define FDMI_PORT_SPEED_8GB 0x10 2150#define FDMI_PORT_SPEED_8GB 0x10
2174#define FDMI_PORT_SPEED_16GB 0x20 2151#define FDMI_PORT_SPEED_16GB 0x20
2152#define FDMI_PORT_SPEED_32GB 0x40
2175#define FDMI_PORT_SPEED_UNKNOWN 0x8000 2153#define FDMI_PORT_SPEED_UNKNOWN 0x8000
2176 2154
2177struct ct_fdmi_port_attr { 2155struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@ struct bidi_statistics {
2680#define QLA_MQ_SIZE 32 2658#define QLA_MQ_SIZE 32
2681#define QLA_MAX_QUEUES 256 2659#define QLA_MAX_QUEUES 256
2682#define ISP_QUE_REG(ha, id) \ 2660#define ISP_QUE_REG(ha, id) \
2683 ((ha->mqenable || IS_QLA83XX(ha)) ? \ 2661 ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
2684 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ 2662 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
2685 ((void __iomem *)ha->iobase)) 2663 ((void __iomem *)ha->iobase))
2686#define QLA_REQ_QUE_ID(tag) \ 2664#define QLA_REQ_QUE_ID(tag) \
@@ -2818,7 +2796,6 @@ struct qla_hw_data {
2818 uint32_t fac_supported :1; 2796 uint32_t fac_supported :1;
2819 2797
2820 uint32_t chip_reset_done :1; 2798 uint32_t chip_reset_done :1;
2821 uint32_t port0 :1;
2822 uint32_t running_gold_fw :1; 2799 uint32_t running_gold_fw :1;
2823 uint32_t eeh_busy :1; 2800 uint32_t eeh_busy :1;
2824 uint32_t cpu_affinity_enabled :1; 2801 uint32_t cpu_affinity_enabled :1;
@@ -2849,7 +2826,7 @@ struct qla_hw_data {
2849 spinlock_t hardware_lock ____cacheline_aligned; 2826 spinlock_t hardware_lock ____cacheline_aligned;
2850 int bars; 2827 int bars;
2851 int mem_only; 2828 int mem_only;
2852 device_reg_t __iomem *iobase; /* Base I/O address */ 2829 device_reg_t *iobase; /* Base I/O address */
2853 resource_size_t pio_address; 2830 resource_size_t pio_address;
2854 2831
2855#define MIN_IOBASE_LEN 0x100 2832#define MIN_IOBASE_LEN 0x100
@@ -2868,8 +2845,8 @@ struct qla_hw_data {
2868 uint32_t rsp_que_off; 2845 uint32_t rsp_que_off;
2869 2846
2870 /* Multi queue data structs */ 2847 /* Multi queue data structs */
2871 device_reg_t __iomem *mqiobase; 2848 device_reg_t *mqiobase;
2872 device_reg_t __iomem *msixbase; 2849 device_reg_t *msixbase;
2873 uint16_t msix_count; 2850 uint16_t msix_count;
2874 uint8_t mqenable; 2851 uint8_t mqenable;
2875 struct req_que **req_q_map; 2852 struct req_que **req_q_map;
@@ -2905,6 +2882,7 @@ struct qla_hw_data {
2905#define PORT_SPEED_4GB 0x03 2882#define PORT_SPEED_4GB 0x03
2906#define PORT_SPEED_8GB 0x04 2883#define PORT_SPEED_8GB 0x04
2907#define PORT_SPEED_16GB 0x05 2884#define PORT_SPEED_16GB 0x05
2885#define PORT_SPEED_32GB 0x06
2908#define PORT_SPEED_10GB 0x13 2886#define PORT_SPEED_10GB 0x13
2909 uint16_t link_data_rate; /* F/W operating speed */ 2887 uint16_t link_data_rate; /* F/W operating speed */
2910 2888
@@ -2928,6 +2906,7 @@ struct qla_hw_data {
2928#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 2906#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
2929#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 2907#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
2930#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 2908#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
2909#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
2931 uint32_t device_type; 2910 uint32_t device_type;
2932#define DT_ISP2100 BIT_0 2911#define DT_ISP2100 BIT_0
2933#define DT_ISP2200 BIT_1 2912#define DT_ISP2200 BIT_1
@@ -2948,7 +2927,8 @@ struct qla_hw_data {
2948#define DT_ISP8031 BIT_16 2927#define DT_ISP8031 BIT_16
2949#define DT_ISPFX00 BIT_17 2928#define DT_ISPFX00 BIT_17
2950#define DT_ISP8044 BIT_18 2929#define DT_ISP8044 BIT_18
2951#define DT_ISP_LAST (DT_ISP8044 << 1) 2930#define DT_ISP2071 BIT_19
2931#define DT_ISP_LAST (DT_ISP2071 << 1)
2952 2932
2953#define DT_T10_PI BIT_25 2933#define DT_T10_PI BIT_25
2954#define DT_IIDMA BIT_26 2934#define DT_IIDMA BIT_26
@@ -2978,6 +2958,7 @@ struct qla_hw_data {
2978#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) 2958#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
2979#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) 2959#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2980#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) 2960#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
2961#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
2981 2962
2982#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2963#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2983 IS_QLA6312(ha) || IS_QLA6322(ha)) 2964 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2986,6 +2967,7 @@ struct qla_hw_data {
2986#define IS_QLA25XX(ha) (IS_QLA2532(ha)) 2967#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2987#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) 2968#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
2988#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2969#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2970#define IS_QLA27XX(ha) (IS_QLA2071(ha))
2989#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2971#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2990 IS_QLA84XX(ha)) 2972 IS_QLA84XX(ha))
2991#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ 2973#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2994,11 +2976,13 @@ struct qla_hw_data {
2994#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2976#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2995 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2977 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2996 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ 2978 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
2997 IS_QLA8044(ha)) 2979 IS_QLA8044(ha) || IS_QLA27XX(ha))
2998#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2980#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2999#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) 2981#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
3000#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2982#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
3001#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2983 IS_QLA27XX(ha))
2984#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
2985 IS_QLA27XX(ha))
3002#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2986#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
3003 2987
3004#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) 2988#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -3008,7 +2992,8 @@ struct qla_hw_data {
3008#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) 2992#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
3009#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2993#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
3010#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) 2994#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
3011#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) 2995#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
2996 IS_QLA27XX(ha))
3012#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha))) 2997#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
3013/* Bit 21 of fw_attributes decides the MCTP capabilities */ 2998/* Bit 21 of fw_attributes decides the MCTP capabilities */
3014#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 2999#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
@@ -3133,6 +3118,9 @@ struct qla_hw_data {
3133 uint16_t fw_xcb_count; 3118 uint16_t fw_xcb_count;
3134 uint16_t fw_iocb_count; 3119 uint16_t fw_iocb_count;
3135 3120
3121 uint32_t fw_shared_ram_start;
3122 uint32_t fw_shared_ram_end;
3123
3136 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ 3124 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
3137 uint8_t fw_seriallink_options[4]; 3125 uint8_t fw_seriallink_options[4];
3138 uint16_t fw_seriallink_options24[4]; 3126 uint16_t fw_seriallink_options24[4];
@@ -3141,6 +3129,9 @@ struct qla_hw_data {
3141 uint32_t mpi_capabilities; 3129 uint32_t mpi_capabilities;
3142 uint8_t phy_version[3]; 3130 uint8_t phy_version[3];
3143 3131
3132 /* Firmware dump template */
3133 void *fw_dump_template;
3134 uint32_t fw_dump_template_len;
3144 /* Firmware dump information. */ 3135 /* Firmware dump information. */
3145 struct qla2xxx_fw_dump *fw_dump; 3136 struct qla2xxx_fw_dump *fw_dump;
3146 uint32_t fw_dump_len; 3137 uint32_t fw_dump_len;
@@ -3183,6 +3174,7 @@ struct qla_hw_data {
3183#define QLA_SWRITING 2 3174#define QLA_SWRITING 2
3184 uint32_t optrom_region_start; 3175 uint32_t optrom_region_start;
3185 uint32_t optrom_region_size; 3176 uint32_t optrom_region_size;
3177 struct mutex optrom_mutex;
3186 3178
3187/* PCI expansion ROM image information. */ 3179/* PCI expansion ROM image information. */
3188#define ROM_CODE_TYPE_BIOS 0 3180#define ROM_CODE_TYPE_BIOS 0
@@ -3309,6 +3301,7 @@ struct qla_hw_data {
3309 struct mr_data_fx00 mr; 3301 struct mr_data_fx00 mr;
3310 3302
3311 struct qlt_hw_data tgt; 3303 struct qlt_hw_data tgt;
3304 int allow_cna_fw_dump;
3312}; 3305};
3313 3306
3314/* 3307/*
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a29294b62..32ab80957688 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
114{ 114{
115 struct qla_hw_data *ha = vha->hw; 115 struct qla_hw_data *ha = vha->hw;
116 116
117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
118 !IS_QLA27XX(ha))
118 goto out; 119 goto out;
119 if (!ha->fce) 120 if (!ha->fce)
120 goto out; 121 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa905a0..3a7353eaccbd 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@ struct qla_flt_header {
1378#define FLT_REG_NVRAM_0 0x15 1378#define FLT_REG_NVRAM_0 0x15
1379#define FLT_REG_VPD_1 0x16 1379#define FLT_REG_VPD_1 0x16
1380#define FLT_REG_NVRAM_1 0x17 1380#define FLT_REG_NVRAM_1 0x17
1381#define FLT_REG_VPD_2 0xD4
1382#define FLT_REG_NVRAM_2 0xD5
1383#define FLT_REG_VPD_3 0xD6
1384#define FLT_REG_NVRAM_3 0xD7
1381#define FLT_REG_FDT 0x1a 1385#define FLT_REG_FDT 0x1a
1382#define FLT_REG_FLT 0x1c 1386#define FLT_REG_FLT 0x1c
1383#define FLT_REG_HW_EVENT_0 0x1d 1387#define FLT_REG_HW_EVENT_0 0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f426628a0a5..e665e8109933 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
330 dma_addr_t); 330 dma_addr_t);
331 331
332extern int qla24xx_abort_command(srb_t *); 332extern int qla24xx_abort_command(srb_t *);
333extern int qla24xx_async_abort_command(srb_t *);
333extern int 334extern int
334qla24xx_abort_target(struct fc_port *, unsigned int, int); 335qla24xx_abort_target(struct fc_port *, unsigned int, int);
335extern int 336extern int
@@ -511,6 +512,16 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int);
511extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 512extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
512extern void qla25xx_fw_dump(scsi_qla_host_t *, int); 513extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
513extern void qla81xx_fw_dump(scsi_qla_host_t *, int); 514extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
515extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
516extern void qla8044_fw_dump(scsi_qla_host_t *, int);
517
518extern void qla27xx_fwdump(scsi_qla_host_t *, int);
519extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
520extern int qla27xx_fwdt_template_valid(void *);
521extern ulong qla27xx_fwdt_template_size(void *);
522extern const void *qla27xx_fwdt_template_default(void);
523extern ulong qla27xx_fwdt_template_default_size(void);
524
514extern void qla2x00_dump_regs(scsi_qla_host_t *); 525extern void qla2x00_dump_regs(scsi_qla_host_t *);
515extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 526extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
516extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 527extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
594extern irqreturn_t qlafx00_intr_handler(int, void *); 605extern irqreturn_t qlafx00_intr_handler(int, void *);
595extern void qlafx00_enable_intrs(struct qla_hw_data *); 606extern void qlafx00_enable_intrs(struct qla_hw_data *);
596extern void qlafx00_disable_intrs(struct qla_hw_data *); 607extern void qlafx00_disable_intrs(struct qla_hw_data *);
597extern int qlafx00_abort_command(srb_t *);
598extern int qlafx00_abort_target(fc_port_t *, unsigned int, int); 608extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
599extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int); 609extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
600extern int qlafx00_start_scsi(srb_t *); 610extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b32d9a..e377f9d2f92a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1532 if (IS_CNA_CAPABLE(ha)) 1532 if (IS_CNA_CAPABLE(ha))
1533 eiter->a.sup_speed = __constant_cpu_to_be32( 1533 eiter->a.sup_speed = __constant_cpu_to_be32(
1534 FDMI_PORT_SPEED_10GB); 1534 FDMI_PORT_SPEED_10GB);
1535 else if (IS_QLA27XX(ha))
1536 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
1538 FDMI_PORT_SPEED_8GB);
1535 else if (IS_QLA25XX(ha)) 1539 else if (IS_QLA25XX(ha))
1536 eiter->a.sup_speed = __constant_cpu_to_be32( 1540 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 1541 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1580 eiter->a.cur_speed = 1584 eiter->a.cur_speed =
1581 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB); 1585 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
1582 break; 1586 break;
1587 case PORT_SPEED_32GB:
1588 eiter->a.cur_speed =
1589 __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
1590 break;
1583 default: 1591 default:
1584 eiter->a.cur_speed = 1592 eiter->a.cur_speed =
1585 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); 1593 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1889 case BIT_10: 1897 case BIT_10:
1890 list[i].fp_speed = PORT_SPEED_16GB; 1898 list[i].fp_speed = PORT_SPEED_16GB;
1891 break; 1899 break;
1900 case BIT_8:
1901 list[i].fp_speed = PORT_SPEED_32GB;
1902 break;
1892 } 1903 }
1893 1904
1894 ql_dbg(ql_dbg_disc, vha, 0x205b, 1905 ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4facf7f..38aeb54cd9d8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@ done:
271} 271}
272 272
273static void 273static void
274qla2x00_async_tm_cmd_done(void *data, void *ptr, int res) 274qla2x00_tmf_iocb_timeout(void *data)
275{ 275{
276 srb_t *sp = (srb_t *)ptr; 276 srb_t *sp = (srb_t *)data;
277 struct srb_iocb *iocb = &sp->u.iocb_cmd; 277 struct srb_iocb *tmf = &sp->u.iocb_cmd;
278 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
279 uint32_t flags;
280 uint16_t lun;
281 int rval;
282
283 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
284 flags = iocb->u.tmf.flags;
285 lun = (uint16_t)iocb->u.tmf.lun;
286 278
287 /* Issue Marker IOCB */ 279 tmf->u.tmf.comp_status = CS_TIMEOUT;
288 rval = qla2x00_marker(vha, vha->hw->req_q_map[0], 280 complete(&tmf->u.tmf.comp);
289 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 281}
290 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
291 282
292 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 283static void
293 ql_dbg(ql_dbg_taskm, vha, 0x8030, 284qla2x00_tmf_sp_done(void *data, void *ptr, int res)
294 "TM IOCB failed (%x).\n", rval); 285{
295 } 286 srb_t *sp = (srb_t *)ptr;
296 } 287 struct srb_iocb *tmf = &sp->u.iocb_cmd;
297 sp->free(sp->fcport->vha, sp); 288 complete(&tmf->u.tmf.comp);
298} 289}
299 290
300int 291int
301qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, 292qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
302 uint32_t tag) 293 uint32_t tag)
303{ 294{
304 struct scsi_qla_host *vha = fcport->vha; 295 struct scsi_qla_host *vha = fcport->vha;
296 struct srb_iocb *tm_iocb;
305 srb_t *sp; 297 srb_t *sp;
306 struct srb_iocb *tcf; 298 int rval = QLA_FUNCTION_FAILED;
307 int rval;
308 299
309 rval = QLA_FUNCTION_FAILED;
310 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 300 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
311 if (!sp) 301 if (!sp)
312 goto done; 302 goto done;
313 303
304 tm_iocb = &sp->u.iocb_cmd;
314 sp->type = SRB_TM_CMD; 305 sp->type = SRB_TM_CMD;
315 sp->name = "tmf"; 306 sp->name = "tmf";
316 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 307 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
317 308 tm_iocb->u.tmf.flags = flags;
318 tcf = &sp->u.iocb_cmd; 309 tm_iocb->u.tmf.lun = lun;
319 tcf->u.tmf.flags = tm_flags; 310 tm_iocb->u.tmf.data = tag;
320 tcf->u.tmf.lun = lun; 311 sp->done = qla2x00_tmf_sp_done;
321 tcf->u.tmf.data = tag; 312 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
322 tcf->timeout = qla2x00_async_iocb_timeout; 313 init_completion(&tm_iocb->u.tmf.comp);
323 sp->done = qla2x00_async_tm_cmd_done;
324 314
325 rval = qla2x00_start_sp(sp); 315 rval = qla2x00_start_sp(sp);
326 if (rval != QLA_SUCCESS) 316 if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
330 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 320 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
331 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 321 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
332 fcport->d_id.b.area, fcport->d_id.b.al_pa); 322 fcport->d_id.b.area, fcport->d_id.b.al_pa);
323
324 wait_for_completion(&tm_iocb->u.tmf.comp);
325
326 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
327 QLA_SUCCESS : QLA_FUNCTION_FAILED;
328
329 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
330 ql_dbg(ql_dbg_taskm, vha, 0x8030,
331 "TM IOCB failed (%x).\n", rval);
332 }
333
334 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
335 flags = tm_iocb->u.tmf.flags;
336 lun = (uint16_t)tm_iocb->u.tmf.lun;
337
338 /* Issue Marker IOCB */
339 qla2x00_marker(vha, vha->hw->req_q_map[0],
340 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
341 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
342 }
343
344done_free_sp:
345 sp->free(vha, sp);
346done:
333 return rval; 347 return rval;
348}
349
350static void
351qla24xx_abort_iocb_timeout(void *data)
352{
353 srb_t *sp = (srb_t *)data;
354 struct srb_iocb *abt = &sp->u.iocb_cmd;
355
356 abt->u.abt.comp_status = CS_TIMEOUT;
357 complete(&abt->u.abt.comp);
358}
359
360static void
361qla24xx_abort_sp_done(void *data, void *ptr, int res)
362{
363 srb_t *sp = (srb_t *)ptr;
364 struct srb_iocb *abt = &sp->u.iocb_cmd;
365
366 complete(&abt->u.abt.comp);
367}
368
369static int
370qla24xx_async_abort_cmd(srb_t *cmd_sp)
371{
372 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
373 fc_port_t *fcport = cmd_sp->fcport;
374 struct srb_iocb *abt_iocb;
375 srb_t *sp;
376 int rval = QLA_FUNCTION_FAILED;
377
378 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
379 if (!sp)
380 goto done;
381
382 abt_iocb = &sp->u.iocb_cmd;
383 sp->type = SRB_ABT_CMD;
384 sp->name = "abort";
385 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
386 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
387 sp->done = qla24xx_abort_sp_done;
388 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
389 init_completion(&abt_iocb->u.abt.comp);
390
391 rval = qla2x00_start_sp(sp);
392 if (rval != QLA_SUCCESS)
393 goto done_free_sp;
394
395 ql_dbg(ql_dbg_async, vha, 0x507c,
396 "Abort command issued - hdl=%x, target_id=%x\n",
397 cmd_sp->handle, fcport->tgt_id);
398
399 wait_for_completion(&abt_iocb->u.abt.comp);
400
401 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
402 QLA_SUCCESS : QLA_FUNCTION_FAILED;
334 403
335done_free_sp: 404done_free_sp:
336 sp->free(fcport->vha, sp); 405 sp->free(vha, sp);
337done: 406done:
338 return rval; 407 return rval;
339} 408}
340 409
410int
411qla24xx_async_abort_command(srb_t *sp)
412{
413 unsigned long flags = 0;
414
415 uint32_t handle;
416 fc_port_t *fcport = sp->fcport;
417 struct scsi_qla_host *vha = fcport->vha;
418 struct qla_hw_data *ha = vha->hw;
419 struct req_que *req = vha->req;
420
421 spin_lock_irqsave(&ha->hardware_lock, flags);
422 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
423 if (req->outstanding_cmds[handle] == sp)
424 break;
425 }
426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
427 if (handle == req->num_outstanding_cmds) {
428 /* Command not found. */
429 return QLA_FUNCTION_FAILED;
430 }
431 if (sp->type == SRB_FXIOCB_DCMD)
432 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
433 FXDISC_ABORT_IOCTL);
434
435 return qla24xx_async_abort_cmd(sp);
436}
437
341void 438void
342qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 439qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
343 uint16_t *data) 440 uint16_t *data)
@@ -1379,7 +1476,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1379 } 1476 }
1380 1477
1381 ha->fw_dumped = 0; 1478 ha->fw_dumped = 0;
1382 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 1479 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1480 req_q_size = rsp_q_size = 0;
1481
1482 if (IS_QLA27XX(ha))
1483 goto try_fce;
1484
1383 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1485 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1384 fixed_size = sizeof(struct qla2100_fw_dump); 1486 fixed_size = sizeof(struct qla2100_fw_dump);
1385 } else if (IS_QLA23XX(ha)) { 1487 } else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1395 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 1497 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1396 else 1498 else
1397 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 1499 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1500
1398 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1501 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1399 sizeof(uint32_t); 1502 sizeof(uint32_t);
1400 if (ha->mqenable) { 1503 if (ha->mqenable) {
@@ -1412,9 +1515,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1412 if (ha->tgt.atio_ring) 1515 if (ha->tgt.atio_ring)
1413 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 1516 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1414 /* Allocate memory for Fibre Channel Event Buffer. */ 1517 /* Allocate memory for Fibre Channel Event Buffer. */
1415 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1518 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1519 !IS_QLA27XX(ha))
1416 goto try_eft; 1520 goto try_eft;
1417 1521
1522try_fce:
1523 if (ha->fce)
1524 dma_free_coherent(&ha->pdev->dev,
1525 FCE_SIZE, ha->fce, ha->fce_dma);
1526
1527 /* Allocate memory for Fibre Channel Event Buffer. */
1418 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1528 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1419 GFP_KERNEL); 1529 GFP_KERNEL);
1420 if (!tc) { 1530 if (!tc) {
@@ -1442,7 +1552,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1442 ha->flags.fce_enabled = 1; 1552 ha->flags.fce_enabled = 1;
1443 ha->fce_dma = tc_dma; 1553 ha->fce_dma = tc_dma;
1444 ha->fce = tc; 1554 ha->fce = tc;
1555
1445try_eft: 1556try_eft:
1557 if (ha->eft)
1558 dma_free_coherent(&ha->pdev->dev,
1559 EFT_SIZE, ha->eft, ha->eft_dma);
1560
1446 /* Allocate memory for Extended Trace Buffer. */ 1561 /* Allocate memory for Extended Trace Buffer. */
1447 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1562 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1448 GFP_KERNEL); 1563 GFP_KERNEL);
@@ -1469,15 +1584,28 @@ try_eft:
1469 ha->eft_dma = tc_dma; 1584 ha->eft_dma = tc_dma;
1470 ha->eft = tc; 1585 ha->eft = tc;
1471 } 1586 }
1587
1472cont_alloc: 1588cont_alloc:
1589 if (IS_QLA27XX(ha)) {
1590 if (!ha->fw_dump_template) {
1591 ql_log(ql_log_warn, vha, 0x00ba,
1592 "Failed missing fwdump template\n");
1593 return;
1594 }
1595 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
1596 ql_dbg(ql_dbg_init, vha, 0x00fa,
1597 "-> allocating fwdump (%x bytes)...\n", dump_size);
1598 goto allocate;
1599 }
1600
1473 req_q_size = req->length * sizeof(request_t); 1601 req_q_size = req->length * sizeof(request_t);
1474 rsp_q_size = rsp->length * sizeof(response_t); 1602 rsp_q_size = rsp->length * sizeof(response_t);
1475
1476 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 1603 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1477 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 1604 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1478 ha->chain_offset = dump_size; 1605 ha->chain_offset = dump_size;
1479 dump_size += mq_size + fce_size; 1606 dump_size += mq_size + fce_size;
1480 1607
1608allocate:
1481 ha->fw_dump = vmalloc(dump_size); 1609 ha->fw_dump = vmalloc(dump_size);
1482 if (!ha->fw_dump) { 1610 if (!ha->fw_dump) {
1483 ql_log(ql_log_warn, vha, 0x00c4, 1611 ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@ cont_alloc:
1499 } 1627 }
1500 return; 1628 return;
1501 } 1629 }
1630 ha->fw_dump_len = dump_size;
1502 ql_dbg(ql_dbg_init, vha, 0x00c5, 1631 ql_dbg(ql_dbg_init, vha, 0x00c5,
1503 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); 1632 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1504 1633
1505 ha->fw_dump_len = dump_size; 1634 if (IS_QLA27XX(ha))
1635 return;
1636
1506 ha->fw_dump->signature[0] = 'Q'; 1637 ha->fw_dump->signature[0] = 'Q';
1507 ha->fw_dump->signature[1] = 'L'; 1638 ha->fw_dump->signature[1] = 'L';
1508 ha->fw_dump->signature[2] = 'G'; 1639 ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@ enable_82xx_npiv:
1718 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1719 } 1850 }
1720 1851
1721 if (IS_QLA83XX(ha))
1722 goto skip_fac_check;
1723
1724 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1852 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1725 uint32_t size; 1853 uint32_t size;
1726 1854
@@ -1733,8 +1861,8 @@ enable_82xx_npiv:
1733 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1861 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1734 ha->fw_major_version, ha->fw_minor_version, 1862 ha->fw_major_version, ha->fw_minor_version,
1735 ha->fw_subminor_version); 1863 ha->fw_subminor_version);
1736skip_fac_check: 1864
1737 if (IS_QLA83XX(ha)) { 1865 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1738 ha->flags.fac_supported = 0; 1866 ha->flags.fac_supported = 0;
1739 rval = QLA_SUCCESS; 1867 rval = QLA_SUCCESS;
1740 } 1868 }
@@ -1933,7 +2061,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1933 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 2061 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1934 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 2062 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1935 2063
1936 if (ha->mqenable || IS_QLA83XX(ha)) { 2064 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1937 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 2065 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1938 icb->rid = __constant_cpu_to_le16(rid); 2066 icb->rid = __constant_cpu_to_le16(rid);
1939 if (ha->flags.msix_enabled) { 2067 if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4792 nv = ha->nvram; 4920 nv = ha->nvram;
4793 4921
4794 /* Determine NVRAM starting address. */ 4922 /* Determine NVRAM starting address. */
4795 if (ha->flags.port0) { 4923 if (ha->port_no == 0) {
4796 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 4924 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4797 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 4925 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4798 } else { 4926 } else {
4799 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 4927 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4800 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 4928 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4801 } 4929 }
4930
4802 ha->nvram_size = sizeof(struct nvram_24xx); 4931 ha->nvram_size = sizeof(struct nvram_24xx);
4803 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4932 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4804 4933
@@ -4842,7 +4971,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4842 nv->exchange_count = __constant_cpu_to_le16(0); 4971 nv->exchange_count = __constant_cpu_to_le16(0);
4843 nv->hard_address = __constant_cpu_to_le16(124); 4972 nv->hard_address = __constant_cpu_to_le16(124);
4844 nv->port_name[0] = 0x21; 4973 nv->port_name[0] = 0x21;
4845 nv->port_name[1] = 0x00 + ha->port_no; 4974 nv->port_name[1] = 0x00 + ha->port_no + 1;
4846 nv->port_name[2] = 0x00; 4975 nv->port_name[2] = 0x00;
4847 nv->port_name[3] = 0xe0; 4976 nv->port_name[3] = 0xe0;
4848 nv->port_name[4] = 0x8b; 4977 nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5117 segments--; 5246 segments--;
5118 } 5247 }
5119 5248
5249 if (!IS_QLA27XX(ha))
5250 return rval;
5251
5252 if (ha->fw_dump_template)
5253 vfree(ha->fw_dump_template);
5254 ha->fw_dump_template = NULL;
5255 ha->fw_dump_template_len = 0;
5256
5257 ql_dbg(ql_dbg_init, vha, 0x0161,
5258 "Loading fwdump template from %x\n", faddr);
5259 qla24xx_read_flash_data(vha, dcode, faddr, 7);
5260 risc_size = be32_to_cpu(dcode[2]);
5261 ql_dbg(ql_dbg_init, vha, 0x0162,
5262 "-> array size %x dwords\n", risc_size);
5263 if (risc_size == 0 || risc_size == ~0)
5264 goto default_template;
5265
5266 dlen = (risc_size - 8) * sizeof(*dcode);
5267 ql_dbg(ql_dbg_init, vha, 0x0163,
5268 "-> template allocating %x bytes...\n", dlen);
5269 ha->fw_dump_template = vmalloc(dlen);
5270 if (!ha->fw_dump_template) {
5271 ql_log(ql_log_warn, vha, 0x0164,
5272 "Failed fwdump template allocate %x bytes.\n", risc_size);
5273 goto default_template;
5274 }
5275
5276 faddr += 7;
5277 risc_size -= 8;
5278 dcode = ha->fw_dump_template;
5279 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
5280 for (i = 0; i < risc_size; i++)
5281 dcode[i] = le32_to_cpu(dcode[i]);
5282
5283 if (!qla27xx_fwdt_template_valid(dcode)) {
5284 ql_log(ql_log_warn, vha, 0x0165,
5285 "Failed fwdump template validate\n");
5286 goto default_template;
5287 }
5288
5289 dlen = qla27xx_fwdt_template_size(dcode);
5290 ql_dbg(ql_dbg_init, vha, 0x0166,
5291 "-> template size %x bytes\n", dlen);
5292 if (dlen > risc_size * sizeof(*dcode)) {
5293 ql_log(ql_log_warn, vha, 0x0167,
5294 "Failed fwdump template exceeds array by %x bytes\n",
5295 (uint32_t)(dlen - risc_size * sizeof(*dcode)));
5296 goto default_template;
5297 }
5298 ha->fw_dump_template_len = dlen;
5299 return rval;
5300
5301default_template:
5302 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
5303 if (ha->fw_dump_template)
5304 vfree(ha->fw_dump_template);
5305 ha->fw_dump_template = NULL;
5306 ha->fw_dump_template_len = 0;
5307
5308 dlen = qla27xx_fwdt_template_default_size();
5309 ql_dbg(ql_dbg_init, vha, 0x0169,
5310 "-> template allocating %x bytes...\n", dlen);
5311 ha->fw_dump_template = vmalloc(dlen);
5312 if (!ha->fw_dump_template) {
5313 ql_log(ql_log_warn, vha, 0x016a,
5314 "Failed fwdump template allocate %x bytes.\n", risc_size);
5315 goto failed_template;
5316 }
5317
5318 dcode = ha->fw_dump_template;
5319 risc_size = dlen / sizeof(*dcode);
5320 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
5321 for (i = 0; i < risc_size; i++)
5322 dcode[i] = be32_to_cpu(dcode[i]);
5323
5324 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5325 ql_log(ql_log_warn, vha, 0x016b,
5326 "Failed fwdump template validate\n");
5327 goto failed_template;
5328 }
5329
5330 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5331 ql_dbg(ql_dbg_init, vha, 0x016c,
5332 "-> template size %x bytes\n", dlen);
5333 ha->fw_dump_template_len = dlen;
5334 return rval;
5335
5336failed_template:
5337 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
5338 if (ha->fw_dump_template)
5339 vfree(ha->fw_dump_template);
5340 ha->fw_dump_template = NULL;
5341 ha->fw_dump_template_len = 0;
5120 return rval; 5342 return rval;
5121} 5343}
5122 5344
@@ -5231,7 +5453,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5231 uint32_t risc_size; 5453 uint32_t risc_size;
5232 uint32_t i; 5454 uint32_t i;
5233 struct fw_blob *blob; 5455 struct fw_blob *blob;
5234 uint32_t *fwcode, fwclen; 5456 const uint32_t *fwcode;
5457 uint32_t fwclen;
5235 struct qla_hw_data *ha = vha->hw; 5458 struct qla_hw_data *ha = vha->hw;
5236 struct req_que *req = ha->req_q_map[0]; 5459 struct req_que *req = ha->req_q_map[0];
5237 5460
@@ -5263,7 +5486,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5263 ql_log(ql_log_fatal, vha, 0x0093, 5486 ql_log(ql_log_fatal, vha, 0x0093,
5264 "Unable to verify integrity of firmware image (%Zd).\n", 5487 "Unable to verify integrity of firmware image (%Zd).\n",
5265 blob->fw->size); 5488 blob->fw->size);
5266 goto fail_fw_integrity; 5489 return QLA_FUNCTION_FAILED;
5267 } 5490 }
5268 for (i = 0; i < 4; i++) 5491 for (i = 0; i < 4; i++)
5269 dcode[i] = be32_to_cpu(fwcode[i + 4]); 5492 dcode[i] = be32_to_cpu(fwcode[i + 4]);
@@ -5277,7 +5500,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5277 ql_log(ql_log_fatal, vha, 0x0095, 5500 ql_log(ql_log_fatal, vha, 0x0095,
5278 "Firmware data: %08x %08x %08x %08x.\n", 5501 "Firmware data: %08x %08x %08x %08x.\n",
5279 dcode[0], dcode[1], dcode[2], dcode[3]); 5502 dcode[0], dcode[1], dcode[2], dcode[3]);
5280 goto fail_fw_integrity; 5503 return QLA_FUNCTION_FAILED;
5281 } 5504 }
5282 5505
5283 while (segments && rval == QLA_SUCCESS) { 5506 while (segments && rval == QLA_SUCCESS) {
@@ -5291,8 +5514,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5291 ql_log(ql_log_fatal, vha, 0x0096, 5514 ql_log(ql_log_fatal, vha, 0x0096,
5292 "Unable to verify integrity of firmware image " 5515 "Unable to verify integrity of firmware image "
5293 "(%Zd).\n", blob->fw->size); 5516 "(%Zd).\n", blob->fw->size);
5294 5517 return QLA_FUNCTION_FAILED;
5295 goto fail_fw_integrity;
5296 } 5518 }
5297 5519
5298 fragment = 0; 5520 fragment = 0;
@@ -5326,10 +5548,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5326 /* Next segment. */ 5548 /* Next segment. */
5327 segments--; 5549 segments--;
5328 } 5550 }
5551
5552 if (!IS_QLA27XX(ha))
5553 return rval;
5554
5555 if (ha->fw_dump_template)
5556 vfree(ha->fw_dump_template);
5557 ha->fw_dump_template = NULL;
5558 ha->fw_dump_template_len = 0;
5559
5560 ql_dbg(ql_dbg_init, vha, 0x171,
5561 "Loading fwdump template from %x\n",
5562 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
5563 risc_size = be32_to_cpu(fwcode[2]);
5564 ql_dbg(ql_dbg_init, vha, 0x172,
5565 "-> array size %x dwords\n", risc_size);
5566 if (risc_size == 0 || risc_size == ~0)
5567 goto default_template;
5568
5569 dlen = (risc_size - 8) * sizeof(*fwcode);
5570 ql_dbg(ql_dbg_init, vha, 0x0173,
5571 "-> template allocating %x bytes...\n", dlen);
5572 ha->fw_dump_template = vmalloc(dlen);
5573 if (!ha->fw_dump_template) {
5574 ql_log(ql_log_warn, vha, 0x0174,
5575 "Failed fwdump template allocate %x bytes.\n", risc_size);
5576 goto default_template;
5577 }
5578
5579 fwcode += 7;
5580 risc_size -= 8;
5581 dcode = ha->fw_dump_template;
5582 for (i = 0; i < risc_size; i++)
5583 dcode[i] = le32_to_cpu(fwcode[i]);
5584
5585 if (!qla27xx_fwdt_template_valid(dcode)) {
5586 ql_log(ql_log_warn, vha, 0x0175,
5587 "Failed fwdump template validate\n");
5588 goto default_template;
5589 }
5590
5591 dlen = qla27xx_fwdt_template_size(dcode);
5592 ql_dbg(ql_dbg_init, vha, 0x0176,
5593 "-> template size %x bytes\n", dlen);
5594 if (dlen > risc_size * sizeof(*fwcode)) {
5595 ql_log(ql_log_warn, vha, 0x0177,
5596 "Failed fwdump template exceeds array by %x bytes\n",
5597 (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
5598 goto default_template;
5599 }
5600 ha->fw_dump_template_len = dlen;
5329 return rval; 5601 return rval;
5330 5602
5331fail_fw_integrity: 5603default_template:
5332 return QLA_FUNCTION_FAILED; 5604 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
5605 if (ha->fw_dump_template)
5606 vfree(ha->fw_dump_template);
5607 ha->fw_dump_template = NULL;
5608 ha->fw_dump_template_len = 0;
5609
5610 dlen = qla27xx_fwdt_template_default_size();
5611 ql_dbg(ql_dbg_init, vha, 0x0179,
5612 "-> template allocating %x bytes...\n", dlen);
5613 ha->fw_dump_template = vmalloc(dlen);
5614 if (!ha->fw_dump_template) {
5615 ql_log(ql_log_warn, vha, 0x017a,
5616 "Failed fwdump template allocate %x bytes.\n", risc_size);
5617 goto failed_template;
5618 }
5619
5620 dcode = ha->fw_dump_template;
5621 risc_size = dlen / sizeof(*fwcode);
5622 fwcode = qla27xx_fwdt_template_default();
5623 for (i = 0; i < risc_size; i++)
5624 dcode[i] = be32_to_cpu(fwcode[i]);
5625
5626 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5627 ql_log(ql_log_warn, vha, 0x017b,
5628 "Failed fwdump template validate\n");
5629 goto failed_template;
5630 }
5631
5632 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5633 ql_dbg(ql_dbg_init, vha, 0x017c,
5634 "-> template size %x bytes\n", dlen);
5635 ha->fw_dump_template_len = dlen;
5636 return rval;
5637
5638failed_template:
5639 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
5640 if (ha->fw_dump_template)
5641 vfree(ha->fw_dump_template);
5642 ha->fw_dump_template = NULL;
5643 ha->fw_dump_template_len = 0;
5644 return rval;
5333} 5645}
5334 5646
5335int 5647int
@@ -5605,7 +5917,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5605 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5917 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5606 nv->exchange_count = __constant_cpu_to_le16(0); 5918 nv->exchange_count = __constant_cpu_to_le16(0);
5607 nv->port_name[0] = 0x21; 5919 nv->port_name[0] = 0x21;
5608 nv->port_name[1] = 0x00 + ha->port_no; 5920 nv->port_name[1] = 0x00 + ha->port_no + 1;
5609 nv->port_name[2] = 0x00; 5921 nv->port_name[2] = 0x00;
5610 nv->port_name[3] = 0xe0; 5922 nv->port_name[3] = 0xe0;
5611 nv->port_name[4] = 0x8b; 5923 nv->port_name[4] = 0x8b;
@@ -5639,7 +5951,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5639 nv->enode_mac[2] = 0xDD; 5951 nv->enode_mac[2] = 0xDD;
5640 nv->enode_mac[3] = 0x04; 5952 nv->enode_mac[3] = 0x04;
5641 nv->enode_mac[4] = 0x05; 5953 nv->enode_mac[4] = 0x05;
5642 nv->enode_mac[5] = 0x06 + ha->port_no; 5954 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
5643 5955
5644 rval = 1; 5956 rval = 1;
5645 } 5957 }
@@ -5677,7 +5989,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5677 icb->enode_mac[2] = 0xDD; 5989 icb->enode_mac[2] = 0xDD;
5678 icb->enode_mac[3] = 0x04; 5990 icb->enode_mac[3] = 0x04;
5679 icb->enode_mac[4] = 0x05; 5991 icb->enode_mac[4] = 0x05;
5680 icb->enode_mac[5] = 0x06 + ha->port_no; 5992 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
5681 } 5993 }
5682 5994
5683 /* Use extended-initialization control block. */ 5995 /* Use extended-initialization control block. */
@@ -5780,7 +6092,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5780 ha->login_retry_count = ql2xloginretrycount; 6092 ha->login_retry_count = ql2xloginretrycount;
5781 6093
5782 /* if not running MSI-X we need handshaking on interrupts */ 6094 /* if not running MSI-X we need handshaking on interrupts */
5783 if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha)) 6095 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
5784 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); 6096 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
5785 6097
5786 /* Enable ZIO. */ 6098 /* Enable ZIO. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 46b9307e8be4..e607568bce49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -488,7 +488,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
488 req->ring_ptr++; 488 req->ring_ptr++;
489 489
490 /* Set chip new ring index. */ 490 /* Set chip new ring index. */
491 if (ha->mqenable || IS_QLA83XX(ha)) { 491 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492 WRT_REG_DWORD(req->req_q_in, req->ring_index); 492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 } else if (IS_QLAFX00(ha)) { 494 } else if (IS_QLAFX00(ha)) {
@@ -524,7 +524,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
524{ 524{
525 mrk_entry_t *mrk; 525 mrk_entry_t *mrk;
526 struct mrk_entry_24xx *mrk24 = NULL; 526 struct mrk_entry_24xx *mrk24 = NULL;
527 struct mrk_entry_fx00 *mrkfx = NULL;
528 527
529 struct qla_hw_data *ha = vha->hw; 528 struct qla_hw_data *ha = vha->hw;
530 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 529 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -541,15 +540,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
541 mrk->entry_type = MARKER_TYPE; 540 mrk->entry_type = MARKER_TYPE;
542 mrk->modifier = type; 541 mrk->modifier = type;
543 if (type != MK_SYNC_ALL) { 542 if (type != MK_SYNC_ALL) {
544 if (IS_QLAFX00(ha)) { 543 if (IS_FWI2_CAPABLE(ha)) {
545 mrkfx = (struct mrk_entry_fx00 *) mrk;
546 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
547 mrkfx->handle_hi = 0;
548 mrkfx->tgt_id = cpu_to_le16(loop_id);
549 mrkfx->lun[1] = LSB(lun);
550 mrkfx->lun[2] = MSB(lun);
551 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
552 } else if (IS_FWI2_CAPABLE(ha)) {
553 mrk24 = (struct mrk_entry_24xx *) mrk; 544 mrk24 = (struct mrk_entry_24xx *) mrk;
554 mrk24->nport_handle = cpu_to_le16(loop_id); 545 mrk24->nport_handle = cpu_to_le16(loop_id);
555 mrk24->lun[1] = LSB(lun); 546 mrk24->lun[1] = LSB(lun);
@@ -1823,7 +1814,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1823 1814
1824 /* Check for room in outstanding command list. */ 1815 /* Check for room in outstanding command list. */
1825 handle = req->current_outstanding_cmd; 1816 handle = req->current_outstanding_cmd;
1826 for (index = 1; req->num_outstanding_cmds; index++) { 1817 for (index = 1; index < req->num_outstanding_cmds; index++) {
1827 handle++; 1818 handle++;
1828 if (handle == req->num_outstanding_cmds) 1819 if (handle == req->num_outstanding_cmds)
1829 handle = 1; 1820 handle = 1;
@@ -1848,7 +1839,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1848skip_cmd_array: 1839skip_cmd_array:
1849 /* Check for room on request queue. */ 1840 /* Check for room on request queue. */
1850 if (req->cnt < req_cnt) { 1841 if (req->cnt < req_cnt) {
1851 if (ha->mqenable || IS_QLA83XX(ha)) 1842 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1852 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1843 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1853 else if (IS_P3P_TYPE(ha)) 1844 else if (IS_P3P_TYPE(ha))
1854 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 1845 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2594,6 +2585,29 @@ queuing_error:
2594 return QLA_FUNCTION_FAILED; 2585 return QLA_FUNCTION_FAILED;
2595} 2586}
2596 2587
2588void
2589qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2590{
2591 struct srb_iocb *aio = &sp->u.iocb_cmd;
2592 scsi_qla_host_t *vha = sp->fcport->vha;
2593 struct req_que *req = vha->req;
2594
2595 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2596 abt_iocb->entry_type = ABORT_IOCB_TYPE;
2597 abt_iocb->entry_count = 1;
2598 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2599 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2600 abt_iocb->handle_to_abort =
2601 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2602 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2603 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2604 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2605 abt_iocb->vp_index = vha->vp_idx;
2606 abt_iocb->req_que_no = cpu_to_le16(req->id);
2607 /* Send the command to the firmware */
2608 wmb();
2609}
2610
2597int 2611int
2598qla2x00_start_sp(srb_t *sp) 2612qla2x00_start_sp(srb_t *sp)
2599{ 2613{
@@ -2647,7 +2661,9 @@ qla2x00_start_sp(srb_t *sp)
2647 qlafx00_fxdisc_iocb(sp, pkt); 2661 qlafx00_fxdisc_iocb(sp, pkt);
2648 break; 2662 break;
2649 case SRB_ABT_CMD: 2663 case SRB_ABT_CMD:
2650 qlafx00_abort_iocb(sp, pkt); 2664 IS_QLAFX00(ha) ?
2665 qlafx00_abort_iocb(sp, pkt) :
2666 qla24xx_abort_iocb(sp, pkt);
2651 break; 2667 break;
2652 default: 2668 default:
2653 break; 2669 break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0a1dcb43d18b..95314ef2e505 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -356,15 +356,16 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
356const char * 356const char *
357qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 357qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
358{ 358{
359 static const char * const link_speeds[] = { 359 static const char *const link_speeds[] = {
360 "1", "2", "?", "4", "8", "16", "10" 360 "1", "2", "?", "4", "8", "16", "32", "10"
361 }; 361 };
362#define QLA_LAST_SPEED 7
362 363
363 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 364 if (IS_QLA2100(ha) || IS_QLA2200(ha))
364 return link_speeds[0]; 365 return link_speeds[0];
365 else if (speed == 0x13) 366 else if (speed == 0x13)
366 return link_speeds[6]; 367 return link_speeds[QLA_LAST_SPEED];
367 else if (speed < 6) 368 else if (speed < QLA_LAST_SPEED)
368 return link_speeds[speed]; 369 return link_speeds[speed];
369 else 370 else
370 return link_speeds[LS_UNKNOWN]; 371 return link_speeds[LS_UNKNOWN];
@@ -649,7 +650,7 @@ skip_rio:
649 break; 650 break;
650 651
651 case MBA_SYSTEM_ERR: /* System Error */ 652 case MBA_SYSTEM_ERR: /* System Error */
652 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 653 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
653 RD_REG_WORD(&reg24->mailbox7) : 0; 654 RD_REG_WORD(&reg24->mailbox7) : 0;
654 ql_log(ql_log_warn, vha, 0x5003, 655 ql_log(ql_log_warn, vha, 0x5003,
655 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 656 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
@@ -666,7 +667,7 @@ skip_rio:
666 vha->device_flags |= DFLG_DEV_FAILED; 667 vha->device_flags |= DFLG_DEV_FAILED;
667 } else { 668 } else {
668 /* Check to see if MPI timeout occurred */ 669 /* Check to see if MPI timeout occurred */
669 if ((mbx & MBX_3) && (ha->flags.port0)) 670 if ((mbx & MBX_3) && (ha->port_no == 0))
670 set_bit(MPI_RESET_NEEDED, 671 set_bit(MPI_RESET_NEEDED,
671 &vha->dpc_flags); 672 &vha->dpc_flags);
672 673
@@ -1497,8 +1498,7 @@ logio_done:
1497} 1498}
1498 1499
1499static void 1500static void
1500qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1501qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1501 struct tsk_mgmt_entry *tsk)
1502{ 1502{
1503 const char func[] = "TMF-IOCB"; 1503 const char func[] = "TMF-IOCB";
1504 const char *type; 1504 const char *type;
@@ -1506,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1506 srb_t *sp; 1506 srb_t *sp;
1507 struct srb_iocb *iocb; 1507 struct srb_iocb *iocb;
1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1509 int error = 1;
1510 1509
1511 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1510 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1512 if (!sp) 1511 if (!sp)
@@ -1515,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1515 iocb = &sp->u.iocb_cmd; 1514 iocb = &sp->u.iocb_cmd;
1516 type = sp->name; 1515 type = sp->name;
1517 fcport = sp->fcport; 1516 fcport = sp->fcport;
1517 iocb->u.tmf.data = QLA_SUCCESS;
1518 1518
1519 if (sts->entry_status) { 1519 if (sts->entry_status) {
1520 ql_log(ql_log_warn, fcport->vha, 0x5038, 1520 ql_log(ql_log_warn, fcport->vha, 0x5038,
1521 "Async-%s error - hdl=%x entry-status(%x).\n", 1521 "Async-%s error - hdl=%x entry-status(%x).\n",
1522 type, sp->handle, sts->entry_status); 1522 type, sp->handle, sts->entry_status);
1523 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1523 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1524 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1524 ql_log(ql_log_warn, fcport->vha, 0x5039, 1525 ql_log(ql_log_warn, fcport->vha, 0x5039,
1525 "Async-%s error - hdl=%x completion status(%x).\n", 1526 "Async-%s error - hdl=%x completion status(%x).\n",
1526 type, sp->handle, sts->comp_status); 1527 type, sp->handle, sts->comp_status);
1527 } else if (!(le16_to_cpu(sts->scsi_status) & 1528 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1529 } else if ((le16_to_cpu(sts->scsi_status) &
1528 SS_RESPONSE_INFO_LEN_VALID)) { 1530 SS_RESPONSE_INFO_LEN_VALID)) {
1529 ql_log(ql_log_warn, fcport->vha, 0x503a, 1531 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1530 "Async-%s error - hdl=%x no response info(%x).\n", 1532 ql_log(ql_log_warn, fcport->vha, 0x503b,
1531 type, sp->handle, sts->scsi_status); 1533 "Async-%s error - hdl=%x not enough response(%d).\n",
1532 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1534 type, sp->handle, sts->rsp_data_len);
1533 ql_log(ql_log_warn, fcport->vha, 0x503b, 1535 } else if (sts->data[3]) {
1534 "Async-%s error - hdl=%x not enough response(%d).\n", 1536 ql_log(ql_log_warn, fcport->vha, 0x503c,
1535 type, sp->handle, sts->rsp_data_len); 1537 "Async-%s error - hdl=%x response(%x).\n",
1536 } else if (sts->data[3]) { 1538 type, sp->handle, sts->data[3]);
1537 ql_log(ql_log_warn, fcport->vha, 0x503c, 1539 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1538 "Async-%s error - hdl=%x response(%x).\n", 1540 }
1539 type, sp->handle, sts->data[3]);
1540 } else {
1541 error = 0;
1542 } 1541 }
1543 1542
1544 if (error) { 1543 if (iocb->u.tmf.data != QLA_SUCCESS)
1545 iocb->u.tmf.data = error;
1546 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1544 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1547 (uint8_t *)sts, sizeof(*sts)); 1545 (uint8_t *)sts, sizeof(*sts));
1548 }
1549 1546
1550 sp->done(vha, sp, 0); 1547 sp->done(vha, sp, 0);
1551} 1548}
@@ -2025,6 +2022,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2025 return; 2022 return;
2026 } 2023 }
2027 2024
2025 /* Task Management completion. */
2026 if (sp->type == SRB_TM_CMD) {
2027 qla24xx_tm_iocb_entry(vha, req, pkt);
2028 return;
2029 }
2030
2028 /* Fast path completion. */ 2031 /* Fast path completion. */
2029 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2032 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2030 qla2x00_process_completed_request(vha, req, handle); 2033 qla2x00_process_completed_request(vha, req, handle);
@@ -2425,6 +2428,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2425 } 2428 }
2426} 2429}
2427 2430
2431static void
2432qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2433 struct abort_entry_24xx *pkt)
2434{
2435 const char func[] = "ABT_IOCB";
2436 srb_t *sp;
2437 struct srb_iocb *abt;
2438
2439 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2440 if (!sp)
2441 return;
2442
2443 abt = &sp->u.iocb_cmd;
2444 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2445 sp->done(vha, sp, 0);
2446}
2447
2428/** 2448/**
2429 * qla24xx_process_response_queue() - Process response queue entries. 2449 * qla24xx_process_response_queue() - Process response queue entries.
2430 * @ha: SCSI driver HA context 2450 * @ha: SCSI driver HA context
@@ -2474,10 +2494,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2474 qla24xx_logio_entry(vha, rsp->req, 2494 qla24xx_logio_entry(vha, rsp->req,
2475 (struct logio_entry_24xx *)pkt); 2495 (struct logio_entry_24xx *)pkt);
2476 break; 2496 break;
2477 case TSK_MGMT_IOCB_TYPE:
2478 qla24xx_tm_iocb_entry(vha, rsp->req,
2479 (struct tsk_mgmt_entry *)pkt);
2480 break;
2481 case CT_IOCB_TYPE: 2497 case CT_IOCB_TYPE:
2482 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2498 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2483 break; 2499 break;
@@ -2497,6 +2513,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2497 * from falling into default case 2513 * from falling into default case
2498 */ 2514 */
2499 break; 2515 break;
2516 case ABORT_IOCB_TYPE:
2517 qla24xx_abort_iocb_entry(vha, rsp->req,
2518 (struct abort_entry_24xx *)pkt);
2519 break;
2500 default: 2520 default:
2501 /* Type Not Supported. */ 2521 /* Type Not Supported. */
2502 ql_dbg(ql_dbg_async, vha, 0x5042, 2522 ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2525,7 +2545,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2525 struct qla_hw_data *ha = vha->hw; 2545 struct qla_hw_data *ha = vha->hw;
2526 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2546 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2527 2547
2528 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2548 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2549 !IS_QLA27XX(ha))
2529 return; 2550 return;
2530 2551
2531 rval = QLA_SUCCESS; 2552 rval = QLA_SUCCESS;
@@ -2979,7 +3000,7 @@ msix_register_fail:
2979 } 3000 }
2980 3001
2981 /* Enable MSI-X vector for response queue update for queue 0 */ 3002 /* Enable MSI-X vector for response queue update for queue 0 */
2982 if (IS_QLA83XX(ha)) { 3003 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2983 if (ha->msixbase && ha->mqiobase && 3004 if (ha->msixbase && ha->mqiobase &&
2984 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3005 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2985 ha->mqenable = 1; 3006 ha->mqenable = 1;
@@ -3003,12 +3024,13 @@ int
3003qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3024qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3004{ 3025{
3005 int ret = QLA_FUNCTION_FAILED; 3026 int ret = QLA_FUNCTION_FAILED;
3006 device_reg_t __iomem *reg = ha->iobase; 3027 device_reg_t *reg = ha->iobase;
3007 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3028 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3008 3029
3009 /* If possible, enable MSI-X. */ 3030 /* If possible, enable MSI-X. */
3010 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3031 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3011 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) 3032 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3033 !IS_QLA27XX(ha))
3012 goto skip_msi; 3034 goto skip_msi;
3013 3035
3014 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3036 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -3043,7 +3065,8 @@ skip_msix:
3043 "Falling back-to MSI mode -%d.\n", ret); 3065 "Falling back-to MSI mode -%d.\n", ret);
3044 3066
3045 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3067 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3046 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha)) 3068 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3069 !IS_QLA27XX(ha))
3047 goto skip_msi; 3070 goto skip_msi;
3048 3071
3049 ret = pci_enable_msi(ha->pdev); 3072 ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b94511ae0051..2528709c4add 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
35{ 35{
36 int rval; 36 int rval;
37 unsigned long flags = 0; 37 unsigned long flags = 0;
38 device_reg_t __iomem *reg; 38 device_reg_t *reg;
39 uint8_t abort_active; 39 uint8_t abort_active;
40 uint8_t io_lock_on; 40 uint8_t io_lock_on;
41 uint16_t command = 0; 41 uint16_t command = 0;
@@ -468,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
468 mcp->mb[1] = MSW(risc_addr); 468 mcp->mb[1] = MSW(risc_addr);
469 mcp->mb[2] = LSW(risc_addr); 469 mcp->mb[2] = LSW(risc_addr);
470 mcp->mb[3] = 0; 470 mcp->mb[3] = 0;
471 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) { 471 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
472 IS_QLA27XX(ha)) {
472 struct nvram_81xx *nv = ha->nvram; 473 struct nvram_81xx *nv = ha->nvram;
473 mcp->mb[4] = (nv->enhanced_features & 474 mcp->mb[4] = (nv->enhanced_features &
474 EXTENDED_BB_CREDITS); 475 EXTENDED_BB_CREDITS);
@@ -539,6 +540,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
539 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 540 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
540 if (IS_FWI2_CAPABLE(ha)) 541 if (IS_FWI2_CAPABLE(ha))
541 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 542 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
543 if (IS_QLA27XX(ha))
544 mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
542 mcp->flags = 0; 545 mcp->flags = 0;
543 mcp->tov = MBX_TOV_SECONDS; 546 mcp->tov = MBX_TOV_SECONDS;
544 rval = qla2x00_mailbox_command(vha, mcp); 547 rval = qla2x00_mailbox_command(vha, mcp);
@@ -574,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
574 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 577 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
575 __func__, mcp->mb[17], mcp->mb[16]); 578 __func__, mcp->mb[17], mcp->mb[16]);
576 } 579 }
580 if (IS_QLA27XX(ha)) {
581 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
582 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
583 }
577 584
578failed: 585failed:
579 if (rval != QLA_SUCCESS) { 586 if (rval != QLA_SUCCESS) {
@@ -1225,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1225 } 1232 }
1226 /* 1 and 2 should normally be captured. */ 1233 /* 1 and 2 should normally be captured. */
1227 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1234 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1228 if (IS_QLA83XX(ha)) 1235 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1229 /* mb3 is additional info about the installed SFP. */ 1236 /* mb3 is additional info about the installed SFP. */
1230 mcp->in_mb |= MBX_3; 1237 mcp->in_mb |= MBX_3;
1231 mcp->buf_size = size; 1238 mcp->buf_size = size;
@@ -2349,7 +2356,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2349 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2356 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2350 mcp->out_mb = MBX_0; 2357 mcp->out_mb = MBX_0;
2351 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2358 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2352 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 2359 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2353 mcp->in_mb |= MBX_12; 2360 mcp->in_mb |= MBX_12;
2354 mcp->tov = MBX_TOV_SECONDS; 2361 mcp->tov = MBX_TOV_SECONDS;
2355 mcp->flags = 0; 2362 mcp->flags = 0;
@@ -2590,6 +2597,9 @@ qla24xx_abort_command(srb_t *sp)
2590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 2597 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2591 "Entered %s.\n", __func__); 2598 "Entered %s.\n", __func__);
2592 2599
2600 if (ql2xasynctmfenable)
2601 return qla24xx_async_abort_command(sp);
2602
2593 spin_lock_irqsave(&ha->hardware_lock, flags); 2603 spin_lock_irqsave(&ha->hardware_lock, flags);
2594 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 2604 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2595 if (req->outstanding_cmds[handle] == sp) 2605 if (req->outstanding_cmds[handle] == sp)
@@ -3032,7 +3042,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3032 "Entered %s.\n", __func__); 3042 "Entered %s.\n", __func__);
3033 3043
3034 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3044 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3035 !IS_QLA83XX(vha->hw)) 3045 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3036 return QLA_FUNCTION_FAILED; 3046 return QLA_FUNCTION_FAILED;
3037 3047
3038 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3048 if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3662,7 +3672,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3662 mcp->mb[12] = req->qos; 3672 mcp->mb[12] = req->qos;
3663 mcp->mb[11] = req->vp_idx; 3673 mcp->mb[11] = req->vp_idx;
3664 mcp->mb[13] = req->rid; 3674 mcp->mb[13] = req->rid;
3665 if (IS_QLA83XX(ha)) 3675 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3666 mcp->mb[15] = 0; 3676 mcp->mb[15] = 0;
3667 3677
3668 mcp->mb[4] = req->id; 3678 mcp->mb[4] = req->id;
@@ -3676,9 +3686,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3676 mcp->flags = MBX_DMA_OUT; 3686 mcp->flags = MBX_DMA_OUT;
3677 mcp->tov = MBX_TOV_SECONDS * 2; 3687 mcp->tov = MBX_TOV_SECONDS * 2;
3678 3688
3679 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 3689 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3680 mcp->in_mb |= MBX_1; 3690 mcp->in_mb |= MBX_1;
3681 if (IS_QLA83XX(ha)) { 3691 if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
3682 mcp->out_mb |= MBX_15; 3692 mcp->out_mb |= MBX_15;
3683 /* debug q create issue in SR-IOV */ 3693 /* debug q create issue in SR-IOV */
3684 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 3694 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3687,7 +3697,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3687 spin_lock_irqsave(&ha->hardware_lock, flags); 3697 spin_lock_irqsave(&ha->hardware_lock, flags);
3688 if (!(req->options & BIT_0)) { 3698 if (!(req->options & BIT_0)) {
3689 WRT_REG_DWORD(req->req_q_in, 0); 3699 WRT_REG_DWORD(req->req_q_in, 0);
3690 if (!IS_QLA83XX(ha)) 3700 if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
3691 WRT_REG_DWORD(req->req_q_out, 0); 3701 WRT_REG_DWORD(req->req_q_out, 0);
3692 } 3702 }
3693 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3725,7 +3735,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3725 mcp->mb[5] = rsp->length; 3735 mcp->mb[5] = rsp->length;
3726 mcp->mb[14] = rsp->msix->entry; 3736 mcp->mb[14] = rsp->msix->entry;
3727 mcp->mb[13] = rsp->rid; 3737 mcp->mb[13] = rsp->rid;
3728 if (IS_QLA83XX(ha)) 3738 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3729 mcp->mb[15] = 0; 3739 mcp->mb[15] = 0;
3730 3740
3731 mcp->mb[4] = rsp->id; 3741 mcp->mb[4] = rsp->id;
@@ -3742,7 +3752,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3742 if (IS_QLA81XX(ha)) { 3752 if (IS_QLA81XX(ha)) {
3743 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 3753 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3744 mcp->in_mb |= MBX_1; 3754 mcp->in_mb |= MBX_1;
3745 } else if (IS_QLA83XX(ha)) { 3755 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3746 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 3756 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3747 mcp->in_mb |= MBX_1; 3757 mcp->in_mb |= MBX_1;
3748 /* debug q create issue in SR-IOV */ 3758 /* debug q create issue in SR-IOV */
@@ -3809,7 +3819,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 3819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3810 "Entered %s.\n", __func__); 3820 "Entered %s.\n", __func__);
3811 3821
3812 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3822 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3823 !IS_QLA27XX(vha->hw))
3813 return QLA_FUNCTION_FAILED; 3824 return QLA_FUNCTION_FAILED;
3814 3825
3815 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3826 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3840,7 +3851,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3840 mbx_cmd_t mc; 3851 mbx_cmd_t mc;
3841 mbx_cmd_t *mcp = &mc; 3852 mbx_cmd_t *mcp = &mc;
3842 3853
3843 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3854 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3855 !IS_QLA27XX(vha->hw))
3844 return QLA_FUNCTION_FAILED; 3856 return QLA_FUNCTION_FAILED;
3845 3857
3846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 3858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -3874,7 +3886,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3874 mbx_cmd_t mc; 3886 mbx_cmd_t mc;
3875 mbx_cmd_t *mcp = &mc; 3887 mbx_cmd_t *mcp = &mc;
3876 3888
3877 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3889 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3890 !IS_QLA27XX(vha->hw))
3878 return QLA_FUNCTION_FAILED; 3891 return QLA_FUNCTION_FAILED;
3879 3892
3880 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 3893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4545,7 +4558,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4545 mcp->mb[1] = 0; 4558 mcp->mb[1] = 0;
4546 mcp->out_mb = MBX_1|MBX_0; 4559 mcp->out_mb = MBX_1|MBX_0;
4547 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4560 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4548 if (IS_QLA83XX(ha)) 4561 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4549 mcp->in_mb |= MBX_3; 4562 mcp->in_mb |= MBX_3;
4550 mcp->tov = MBX_TOV_SECONDS; 4563 mcp->tov = MBX_TOV_SECONDS;
4551 mcp->flags = 0; 4564 mcp->flags = 0;
@@ -4574,7 +4587,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4575 "Entered %s.\n", __func__); 4588 "Entered %s.\n", __func__);
4576 4589
4577 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha)) 4590 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
4591 !IS_QLA27XX(ha))
4578 return QLA_FUNCTION_FAILED; 4592 return QLA_FUNCTION_FAILED;
4579 mcp->mb[0] = MBC_GET_PORT_CONFIG; 4593 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4580 mcp->out_mb = MBX_0; 4594 mcp->out_mb = MBX_0;
@@ -5070,7 +5084,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5070 mbx_cmd_t mc; 5084 mbx_cmd_t mc;
5071 mbx_cmd_t *mcp = &mc; 5085 mbx_cmd_t *mcp = &mc;
5072 5086
5073 if (!IS_QLA83XX(ha)) 5087 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5074 return QLA_FUNCTION_FAILED; 5088 return QLA_FUNCTION_FAILED;
5075 5089
5076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5145,7 +5159,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5145 struct qla_hw_data *ha = vha->hw; 5159 struct qla_hw_data *ha = vha->hw;
5146 unsigned long retry_max_time = jiffies + (2 * HZ); 5160 unsigned long retry_max_time = jiffies + (2 * HZ);
5147 5161
5148 if (!IS_QLA83XX(ha)) 5162 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5149 return QLA_FUNCTION_FAILED; 5163 return QLA_FUNCTION_FAILED;
5150 5164
5151 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 5165 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a72df701fb38..f0a852257f99 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
630 struct req_que *req = NULL; 630 struct req_que *req = NULL;
631 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 631 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
632 uint16_t que_id = 0; 632 uint16_t que_id = 0;
633 device_reg_t __iomem *reg; 633 device_reg_t *reg;
634 uint32_t cnt; 634 uint32_t cnt;
635 635
636 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 636 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
@@ -754,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
754 struct rsp_que *rsp = NULL; 754 struct rsp_que *rsp = NULL;
755 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 755 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
756 uint16_t que_id = 0; 756 uint16_t que_id = 0;
757 device_reg_t __iomem *reg; 757 device_reg_t *reg;
758 758
759 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 759 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
760 if (rsp == NULL) { 760 if (rsp == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ba6f8b139c98..0aaf6a9c87d3 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -40,7 +40,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
40{ 40{
41 int rval; 41 int rval;
42 unsigned long flags = 0; 42 unsigned long flags = 0;
43 device_reg_t __iomem *reg; 43 device_reg_t *reg;
44 uint8_t abort_active; 44 uint8_t abort_active;
45 uint8_t io_lock_on; 45 uint8_t io_lock_on;
46 uint16_t command = 0; 46 uint16_t command = 0;
@@ -631,20 +631,6 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
631{ 631{
632 struct qla_hw_data *ha = vha->hw; 632 struct qla_hw_data *ha = vha->hw;
633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
634 struct init_cb_fx *icb;
635 struct req_que *req = ha->req_q_map[0];
636 struct rsp_que *rsp = ha->rsp_q_map[0];
637
638 /* Setup ring parameters in initialization control block. */
639 icb = (struct init_cb_fx *)ha->init_cb;
640 icb->request_q_outpointer = __constant_cpu_to_le16(0);
641 icb->response_q_inpointer = __constant_cpu_to_le16(0);
642 icb->request_q_length = cpu_to_le16(req->length);
643 icb->response_q_length = cpu_to_le16(rsp->length);
644 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
645 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
646 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
647 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
648 634
649 WRT_REG_DWORD(&reg->req_q_in, 0); 635 WRT_REG_DWORD(&reg->req_q_in, 0);
650 WRT_REG_DWORD(&reg->req_q_out, 0); 636 WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -699,78 +685,16 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)
699 spin_unlock_irqrestore(&ha->hardware_lock, flags); 685 spin_unlock_irqrestore(&ha->hardware_lock, flags);
700} 686}
701 687
702static void
703qlafx00_tmf_iocb_timeout(void *data)
704{
705 srb_t *sp = (srb_t *)data;
706 struct srb_iocb *tmf = &sp->u.iocb_cmd;
707
708 tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
709 complete(&tmf->u.tmf.comp);
710}
711
712static void
713qlafx00_tmf_sp_done(void *data, void *ptr, int res)
714{
715 srb_t *sp = (srb_t *)ptr;
716 struct srb_iocb *tmf = &sp->u.iocb_cmd;
717
718 complete(&tmf->u.tmf.comp);
719}
720
721static int
722qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
723 uint32_t lun, uint32_t tag)
724{
725 scsi_qla_host_t *vha = fcport->vha;
726 struct srb_iocb *tm_iocb;
727 srb_t *sp;
728 int rval = QLA_FUNCTION_FAILED;
729
730 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
731 if (!sp)
732 goto done;
733
734 tm_iocb = &sp->u.iocb_cmd;
735 sp->type = SRB_TM_CMD;
736 sp->name = "tmf";
737 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
738 tm_iocb->u.tmf.flags = flags;
739 tm_iocb->u.tmf.lun = lun;
740 tm_iocb->u.tmf.data = tag;
741 sp->done = qlafx00_tmf_sp_done;
742 tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
743 init_completion(&tm_iocb->u.tmf.comp);
744
745 rval = qla2x00_start_sp(sp);
746 if (rval != QLA_SUCCESS)
747 goto done_free_sp;
748
749 ql_dbg(ql_dbg_async, vha, 0x507b,
750 "Task management command issued target_id=%x\n",
751 fcport->tgt_id);
752
753 wait_for_completion(&tm_iocb->u.tmf.comp);
754
755 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
756 QLA_SUCCESS : QLA_FUNCTION_FAILED;
757
758done_free_sp:
759 sp->free(vha, sp);
760done:
761 return rval;
762}
763
764int 688int
765qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) 689qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
766{ 690{
767 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 691 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
768} 692}
769 693
770int 694int
771qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) 695qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
772{ 696{
773 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 697 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
774} 698}
775 699
776int 700int
@@ -997,6 +921,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
997 break; 921 break;
998 922
999 default: 923 default:
924 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
925 break;
926
1000 /* If fw is apparently not ready. In order to continue, 927 /* If fw is apparently not ready. In order to continue,
1001 * we might need to issue Mbox cmd, but the problem is 928 * we might need to issue Mbox cmd, but the problem is
1002 * that the DoorBell vector values that come with the 929 * that the DoorBell vector values that come with the
@@ -2014,7 +1941,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
2014 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1941 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
2015 } else if (fx_type == FXDISC_ABORT_IOCTL) 1942 } else if (fx_type == FXDISC_ABORT_IOCTL)
2016 fdisc->u.fxiocb.result = 1943 fdisc->u.fxiocb.result =
2017 (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ? 1944 (fdisc->u.fxiocb.result ==
1945 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
2018 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); 1946 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
2019 1947
2020 rval = le32_to_cpu(fdisc->u.fxiocb.result); 1948 rval = le32_to_cpu(fdisc->u.fxiocb.result);
@@ -2034,94 +1962,6 @@ done:
2034 return rval; 1962 return rval;
2035} 1963}
2036 1964
2037static void
2038qlafx00_abort_iocb_timeout(void *data)
2039{
2040 srb_t *sp = (srb_t *)data;
2041 struct srb_iocb *abt = &sp->u.iocb_cmd;
2042
2043 abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
2044 complete(&abt->u.abt.comp);
2045}
2046
2047static void
2048qlafx00_abort_sp_done(void *data, void *ptr, int res)
2049{
2050 srb_t *sp = (srb_t *)ptr;
2051 struct srb_iocb *abt = &sp->u.iocb_cmd;
2052
2053 complete(&abt->u.abt.comp);
2054}
2055
2056static int
2057qlafx00_async_abt_cmd(srb_t *cmd_sp)
2058{
2059 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
2060 fc_port_t *fcport = cmd_sp->fcport;
2061 struct srb_iocb *abt_iocb;
2062 srb_t *sp;
2063 int rval = QLA_FUNCTION_FAILED;
2064
2065 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2066 if (!sp)
2067 goto done;
2068
2069 abt_iocb = &sp->u.iocb_cmd;
2070 sp->type = SRB_ABT_CMD;
2071 sp->name = "abort";
2072 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
2073 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
2074 sp->done = qlafx00_abort_sp_done;
2075 abt_iocb->timeout = qlafx00_abort_iocb_timeout;
2076 init_completion(&abt_iocb->u.abt.comp);
2077
2078 rval = qla2x00_start_sp(sp);
2079 if (rval != QLA_SUCCESS)
2080 goto done_free_sp;
2081
2082 ql_dbg(ql_dbg_async, vha, 0x507c,
2083 "Abort command issued - hdl=%x, target_id=%x\n",
2084 cmd_sp->handle, fcport->tgt_id);
2085
2086 wait_for_completion(&abt_iocb->u.abt.comp);
2087
2088 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
2089 QLA_SUCCESS : QLA_FUNCTION_FAILED;
2090
2091done_free_sp:
2092 sp->free(vha, sp);
2093done:
2094 return rval;
2095}
2096
2097int
2098qlafx00_abort_command(srb_t *sp)
2099{
2100 unsigned long flags = 0;
2101
2102 uint32_t handle;
2103 fc_port_t *fcport = sp->fcport;
2104 struct scsi_qla_host *vha = fcport->vha;
2105 struct qla_hw_data *ha = vha->hw;
2106 struct req_que *req = vha->req;
2107
2108 spin_lock_irqsave(&ha->hardware_lock, flags);
2109 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
2110 if (req->outstanding_cmds[handle] == sp)
2111 break;
2112 }
2113 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2114 if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
2115 /* Command not found. */
2116 return QLA_FUNCTION_FAILED;
2117 }
2118 if (sp->type == SRB_FXIOCB_DCMD)
2119 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2120 FXDISC_ABORT_IOCTL);
2121
2122 return qlafx00_async_abt_cmd(sp);
2123}
2124
2125/* 1965/*
2126 * qlafx00_initialize_adapter 1966 * qlafx00_initialize_adapter
2127 * Initialize board. 1967 * Initialize board.
@@ -2150,7 +1990,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2150 vha->device_flags = DFLG_NO_CABLE; 1990 vha->device_flags = DFLG_NO_CABLE;
2151 vha->dpc_flags = 0; 1991 vha->dpc_flags = 0;
2152 vha->flags.management_server_logged_in = 0; 1992 vha->flags.management_server_logged_in = 0;
2153 vha->marker_needed = 0;
2154 ha->isp_abort_cnt = 0; 1993 ha->isp_abort_cnt = 0;
2155 ha->beacon_blink_led = 0; 1994 ha->beacon_blink_led = 0;
2156 1995
@@ -2354,8 +2193,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2354 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2193 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2355 fstatus.ioctl_data = pkt->dataword_r; 2194 fstatus.ioctl_data = pkt->dataword_r;
2356 fstatus.adapid = pkt->adapid; 2195 fstatus.adapid = pkt->adapid;
2357 fstatus.adapid_hi = pkt->adapid_hi; 2196 fstatus.reserved_2 = pkt->dataword_r_extra;
2358 fstatus.reserved_2 = pkt->reserved_1;
2359 fstatus.res_count = pkt->residuallen; 2197 fstatus.res_count = pkt->residuallen;
2360 fstatus.status = pkt->status; 2198 fstatus.status = pkt->status;
2361 fstatus.seq_number = pkt->seq_no; 2199 fstatus.seq_number = pkt->seq_no;
@@ -2804,7 +2642,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2804 srb_t *sp; 2642 srb_t *sp;
2805 struct qla_hw_data *ha = vha->hw; 2643 struct qla_hw_data *ha = vha->hw;
2806 const char func[] = "ERROR-IOCB"; 2644 const char func[] = "ERROR-IOCB";
2807 uint16_t que = MSW(pkt->handle); 2645 uint16_t que = 0;
2808 struct req_que *req = NULL; 2646 struct req_que *req = NULL;
2809 int res = DID_ERROR << 16; 2647 int res = DID_ERROR << 16;
2810 2648
@@ -2833,16 +2671,22 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2833{ 2671{
2834 struct sts_entry_fx00 *pkt; 2672 struct sts_entry_fx00 *pkt;
2835 response_t *lptr; 2673 response_t *lptr;
2674 uint16_t lreq_q_in = 0;
2675 uint16_t lreq_q_out = 0;
2836 2676
2837 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != 2677 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
2838 RESPONSE_PROCESSED) { 2678 lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
2679
2680 while (lreq_q_in != lreq_q_out) {
2839 lptr = rsp->ring_ptr; 2681 lptr = rsp->ring_ptr;
2840 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, 2682 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2841 sizeof(rsp->rsp_pkt)); 2683 sizeof(rsp->rsp_pkt));
2842 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2684 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2843 2685
2844 rsp->ring_index++; 2686 rsp->ring_index++;
2687 lreq_q_out++;
2845 if (rsp->ring_index == rsp->length) { 2688 if (rsp->ring_index == rsp->length) {
2689 lreq_q_out = 0;
2846 rsp->ring_index = 0; 2690 rsp->ring_index = 0;
2847 rsp->ring_ptr = rsp->ring; 2691 rsp->ring_ptr = rsp->ring;
2848 } else { 2692 } else {
@@ -2854,7 +2698,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2854 qlafx00_error_entry(vha, rsp, 2698 qlafx00_error_entry(vha, rsp,
2855 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2699 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2856 pkt->entry_type); 2700 pkt->entry_type);
2857 goto next_iter;
2858 continue; 2701 continue;
2859 } 2702 }
2860 2703
@@ -2888,10 +2731,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2888 pkt->entry_type, pkt->entry_status); 2731 pkt->entry_type, pkt->entry_status);
2889 break; 2732 break;
2890 } 2733 }
2891next_iter:
2892 WRT_REG_DWORD((void __iomem *)&lptr->signature,
2893 RESPONSE_PROCESSED);
2894 wmb();
2895 } 2734 }
2896 2735
2897 /* Adjust ring index */ 2736 /* Adjust ring index */
@@ -2926,9 +2765,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
2926 break; 2765 break;
2927 2766
2928 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2767 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2929 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1); 2768 ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
2930 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2); 2769 ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
2931 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3); 2770 ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
2932 ql_dbg(ql_dbg_async, vha, 0x5077, 2771 ql_dbg(ql_dbg_async, vha, 0x5077,
2933 "Asynchronous port Update received " 2772 "Asynchronous port Update received "
2934 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2773 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2985,7 +2824,7 @@ static void
2985qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2824qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2986{ 2825{
2987 uint16_t cnt; 2826 uint16_t cnt;
2988 uint16_t __iomem *wptr; 2827 uint32_t __iomem *wptr;
2989 struct qla_hw_data *ha = vha->hw; 2828 struct qla_hw_data *ha = vha->hw;
2990 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2829 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2991 2830
@@ -2995,10 +2834,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2995 /* Load return mailbox registers. */ 2834 /* Load return mailbox registers. */
2996 ha->flags.mbox_int = 1; 2835 ha->flags.mbox_int = 1;
2997 ha->mailbox_out32[0] = mb0; 2836 ha->mailbox_out32[0] = mb0;
2998 wptr = (uint16_t __iomem *)&reg->mailbox17; 2837 wptr = (uint32_t __iomem *)&reg->mailbox17;
2999 2838
3000 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2839 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3001 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr); 2840 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
3002 wptr++; 2841 wptr++;
3003 } 2842 }
3004} 2843}
@@ -3025,6 +2864,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
3025 struct rsp_que *rsp; 2864 struct rsp_que *rsp;
3026 unsigned long flags; 2865 unsigned long flags;
3027 uint32_t clr_intr = 0; 2866 uint32_t clr_intr = 0;
2867 uint32_t intr_stat = 0;
3028 2868
3029 rsp = (struct rsp_que *) dev_id; 2869 rsp = (struct rsp_que *) dev_id;
3030 if (!rsp) { 2870 if (!rsp) {
@@ -3046,34 +2886,26 @@ qlafx00_intr_handler(int irq, void *dev_id)
3046 stat = QLAFX00_RD_INTR_REG(ha); 2886 stat = QLAFX00_RD_INTR_REG(ha);
3047 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2887 if (qla2x00_check_reg_for_disconnect(vha, stat))
3048 break; 2888 break;
3049 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0) 2889 intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2890 if (!intr_stat)
3050 break; 2891 break;
3051 2892
3052 switch (stat & QLAFX00_HST_INT_STS_BITS) { 2893 if (stat & QLAFX00_INTR_MB_CMPLT) {
3053 case QLAFX00_INTR_MB_CMPLT:
3054 case QLAFX00_INTR_MB_RSP_CMPLT:
3055 case QLAFX00_INTR_MB_ASYNC_CMPLT:
3056 case QLAFX00_INTR_ALL_CMPLT:
3057 mb[0] = RD_REG_WORD(&reg->mailbox16); 2894 mb[0] = RD_REG_WORD(&reg->mailbox16);
3058 qlafx00_mbx_completion(vha, mb[0]); 2895 qlafx00_mbx_completion(vha, mb[0]);
3059 status |= MBX_INTERRUPT; 2896 status |= MBX_INTERRUPT;
3060 clr_intr |= QLAFX00_INTR_MB_CMPLT; 2897 clr_intr |= QLAFX00_INTR_MB_CMPLT;
3061 break; 2898 }
3062 case QLAFX00_INTR_ASYNC_CMPLT: 2899 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
3063 case QLAFX00_INTR_RSP_ASYNC_CMPLT:
3064 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0); 2900 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
3065 qlafx00_async_event(vha); 2901 qlafx00_async_event(vha);
3066 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 2902 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
3067 break; 2903 }
3068 case QLAFX00_INTR_RSP_CMPLT: 2904 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
3069 qlafx00_process_response_queue(vha, rsp); 2905 qlafx00_process_response_queue(vha, rsp);
3070 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 2906 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
3071 break;
3072 default:
3073 ql_dbg(ql_dbg_async, vha, 0x507a,
3074 "Unrecognized interrupt type (%d).\n", stat);
3075 break;
3076 } 2907 }
2908
3077 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2909 QLAFX00_CLR_INTR_REG(ha, clr_intr);
3078 QLAFX00_RD_INTR_REG(ha); 2910 QLAFX00_RD_INTR_REG(ha);
3079 } 2911 }
@@ -3223,17 +3055,6 @@ qlafx00_start_scsi(srb_t *sp)
3223 /* So we know we haven't pci_map'ed anything yet */ 3055 /* So we know we haven't pci_map'ed anything yet */
3224 tot_dsds = 0; 3056 tot_dsds = 0;
3225 3057
3226 /* Forcing marker needed for now */
3227 vha->marker_needed = 0;
3228
3229 /* Send marker if required */
3230 if (vha->marker_needed != 0) {
3231 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
3232 QLA_SUCCESS)
3233 return QLA_FUNCTION_FAILED;
3234 vha->marker_needed = 0;
3235 }
3236
3237 /* Acquire ring specific lock */ 3058 /* Acquire ring specific lock */
3238 spin_lock_irqsave(&ha->hardware_lock, flags); 3059 spin_lock_irqsave(&ha->hardware_lock, flags);
3239 3060
@@ -3284,7 +3105,9 @@ qlafx00_start_scsi(srb_t *sp)
3284 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3105 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3285 3106
3286 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3107 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3287 lcmd_pkt.handle_hi = 0; 3108 lcmd_pkt.reserved_0 = 0;
3109 lcmd_pkt.port_path_ctrl = 0;
3110 lcmd_pkt.reserved_1 = 0;
3288 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3111 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3289 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3112 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3290 3113
@@ -3364,8 +3187,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3364 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3187 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3365 tm_iocb.entry_count = 1; 3188 tm_iocb.entry_count = 1;
3366 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3189 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3367 tm_iocb.handle_hi = 0; 3190 tm_iocb.reserved_0 = 0;
3368 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3369 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3191 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3370 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3192 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3371 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { 3193 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 6cd7072cc0ff..e529dfaeb854 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -22,13 +22,16 @@ struct cmd_type_7_fx00 {
22 uint8_t entry_status; /* Entry Status. */ 22 uint8_t entry_status; /* Entry Status. */
23 23
24 uint32_t handle; /* System handle. */ 24 uint32_t handle; /* System handle. */
25 uint32_t handle_hi; 25 uint8_t reserved_0;
26 uint8_t port_path_ctrl;
27 uint16_t reserved_1;
26 28
27 __le16 tgt_idx; /* Target Idx. */ 29 __le16 tgt_idx; /* Target Idx. */
28 uint16_t timeout; /* Command timeout. */ 30 uint16_t timeout; /* Command timeout. */
29 31
30 __le16 dseg_count; /* Data segment count. */ 32 __le16 dseg_count; /* Data segment count. */
31 uint16_t scsi_rsp_dsd_len; 33 uint8_t scsi_rsp_dsd_len;
34 uint8_t reserved_2;
32 35
33 struct scsi_lun lun; /* LUN (LE). */ 36 struct scsi_lun lun; /* LUN (LE). */
34 37
@@ -47,30 +50,6 @@ struct cmd_type_7_fx00 {
47 uint32_t dseg_0_len; /* Data segment 0 length. */ 50 uint32_t dseg_0_len; /* Data segment 0 length. */
48}; 51};
49 52
50/*
51 * ISP queue - marker entry structure definition.
52 */
53struct mrk_entry_fx00 {
54 uint8_t entry_type; /* Entry type. */
55 uint8_t entry_count; /* Entry count. */
56 uint8_t handle_count; /* Handle count. */
57 uint8_t entry_status; /* Entry Status. */
58
59 uint32_t handle; /* System handle. */
60 uint32_t handle_hi; /* System handle. */
61
62 uint16_t tgt_id; /* Target ID. */
63
64 uint8_t modifier; /* Modifier (7-0). */
65 uint8_t reserved_1;
66
67 uint8_t reserved_2[5];
68
69 uint8_t lun[8]; /* FCP LUN (BE). */
70 uint8_t reserved_3[36];
71};
72
73
74#define STATUS_TYPE_FX00 0x01 /* Status entry. */ 53#define STATUS_TYPE_FX00 0x01 /* Status entry. */
75struct sts_entry_fx00 { 54struct sts_entry_fx00 {
76 uint8_t entry_type; /* Entry type. */ 55 uint8_t entry_type; /* Entry type. */
@@ -79,7 +58,7 @@ struct sts_entry_fx00 {
79 uint8_t entry_status; /* Entry Status. */ 58 uint8_t entry_status; /* Entry Status. */
80 59
81 uint32_t handle; /* System handle. */ 60 uint32_t handle; /* System handle. */
82 uint32_t handle_hi; /* System handle. */ 61 uint32_t reserved_3; /* System handle. */
83 62
84 __le16 comp_status; /* Completion status. */ 63 __le16 comp_status; /* Completion status. */
85 uint16_t reserved_0; /* OX_ID used by the firmware. */ 64 uint16_t reserved_0; /* OX_ID used by the firmware. */
@@ -102,7 +81,7 @@ struct sts_entry_fx00 {
102 81
103struct multi_sts_entry_fx00 { 82struct multi_sts_entry_fx00 {
104 uint8_t entry_type; /* Entry type. */ 83 uint8_t entry_type; /* Entry type. */
105 uint8_t sys_define; /* System defined. */ 84 uint8_t entry_count; /* Entry count. */
106 uint8_t handle_count; 85 uint8_t handle_count;
107 uint8_t entry_status; 86 uint8_t entry_status;
108 87
@@ -118,15 +97,13 @@ struct tsk_mgmt_entry_fx00 {
118 97
119 __le32 handle; /* System handle. */ 98 __le32 handle; /* System handle. */
120 99
121 uint32_t handle_hi; /* System handle. */ 100 uint32_t reserved_0;
122 101
123 __le16 tgt_id; /* Target Idx. */ 102 __le16 tgt_id; /* Target Idx. */
124 103
125 uint16_t reserved_1; 104 uint16_t reserved_1;
126 105 uint16_t reserved_3;
127 uint16_t delay; /* Activity delay in seconds. */ 106 uint16_t reserved_4;
128
129 __le16 timeout; /* Command timeout. */
130 107
131 struct scsi_lun lun; /* LUN (LE). */ 108 struct scsi_lun lun; /* LUN (LE). */
132 109
@@ -144,13 +121,13 @@ struct abort_iocb_entry_fx00 {
144 uint8_t entry_status; /* Entry Status. */ 121 uint8_t entry_status; /* Entry Status. */
145 122
146 __le32 handle; /* System handle. */ 123 __le32 handle; /* System handle. */
147 __le32 handle_hi; /* System handle. */ 124 __le32 reserved_0;
148 125
149 __le16 tgt_id_sts; /* Completion status. */ 126 __le16 tgt_id_sts; /* Completion status. */
150 __le16 options; 127 __le16 options;
151 128
152 __le32 abort_handle; /* System handle. */ 129 __le32 abort_handle; /* System handle. */
153 __le32 abort_handle_hi; /* System handle. */ 130 __le32 reserved_2;
154 131
155 __le16 req_que_no; 132 __le16 req_que_no;
156 uint8_t reserved_1[38]; 133 uint8_t reserved_1[38];
@@ -171,8 +148,7 @@ struct ioctl_iocb_entry_fx00 {
171 148
172 __le32 dataword_r; /* Data word returned */ 149 __le32 dataword_r; /* Data word returned */
173 uint32_t adapid; /* Adapter ID */ 150 uint32_t adapid; /* Adapter ID */
174 uint32_t adapid_hi; /* Adapter ID high */ 151 uint32_t dataword_r_extra;
175 uint32_t reserved_1;
176 152
177 __le32 seq_no; 153 __le32 seq_no;
178 uint8_t reserved_2[20]; 154 uint8_t reserved_2[20];
@@ -360,11 +336,7 @@ struct config_info_data {
360 336
361#define QLAFX00_INTR_MB_CMPLT 0x1 337#define QLAFX00_INTR_MB_CMPLT 0x1
362#define QLAFX00_INTR_RSP_CMPLT 0x2 338#define QLAFX00_INTR_RSP_CMPLT 0x2
363#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
364#define QLAFX00_INTR_ASYNC_CMPLT 0x4 339#define QLAFX00_INTR_ASYNC_CMPLT 0x4
365#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
366#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
367#define QLAFX00_INTR_ALL_CMPLT 0x7
368 340
369#define QLAFX00_MBA_SYSTEM_ERR 0x8002 341#define QLAFX00_MBA_SYSTEM_ERR 0x8002
370#define QLAFX00_MBA_TEMP_OVER 0x8005 342#define QLAFX00_MBA_TEMP_OVER 0x8005
@@ -548,4 +520,7 @@ struct mr_data_fx00 {
548/* Max conncurrent IOs that can be queued */ 520/* Max conncurrent IOs that can be queued */
549#define QLAFX00_MAX_CANQUEUE 1024 521#define QLAFX00_MAX_CANQUEUE 1024
550 522
523/* IOCTL IOCB abort success */
524#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68
525
551#endif 526#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1e6ba4a369e2..5511e24b1f11 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1664,10 +1664,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1664 /* Mapping of IO base pointer */ 1664 /* Mapping of IO base pointer */
1665 if (IS_QLA8044(ha)) { 1665 if (IS_QLA8044(ha)) {
1666 ha->iobase = 1666 ha->iobase =
1667 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase); 1667 (device_reg_t *)((uint8_t *)ha->nx_pcibase);
1668 } else if (IS_QLA82XX(ha)) { 1668 } else if (IS_QLA82XX(ha)) {
1669 ha->iobase = 1669 ha->iobase =
1670 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 1670 (device_reg_t *)((uint8_t *)ha->nx_pcibase +
1671 0xbc000 + (ha->pdev->devfn << 11)); 1671 0xbc000 + (ha->pdev->devfn << 11));
1672 } 1672 }
1673 1673
@@ -4502,3 +4502,20 @@ exit:
4502 qla82xx_idc_unlock(ha); 4502 qla82xx_idc_unlock(ha);
4503 return rval; 4503 return rval;
4504} 4504}
4505
4506void
4507qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
4508{
4509 struct qla_hw_data *ha = vha->hw;
4510
4511 if (!ha->allow_cna_fw_dump)
4512 return;
4513
4514 scsi_block_requests(vha->host);
4515 ha->flags.isp82xx_no_md_cap = 1;
4516 qla82xx_idc_lock(ha);
4517 qla82xx_set_reset_owner(vha);
4518 qla82xx_idc_unlock(ha);
4519 qla2x00_wait_for_chip_reset(vha);
4520 scsi_unblock_requests(vha->host);
4521}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index f60989d729a8..86cf10815db0 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1578,8 +1578,8 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
1578 do { 1578 do {
1579 if (time_after_eq(jiffies, dev_init_timeout)) { 1579 if (time_after_eq(jiffies, dev_init_timeout)) {
1580 ql_log(ql_log_info, vha, 0xb0c4, 1580 ql_log(ql_log_info, vha, 0xb0c4,
1581 "%s: Non Reset owner DEV INIT " 1581 "%s: Non Reset owner: Reset Ack Timeout!\n",
1582 "TIMEOUT!\n", __func__); 1582 __func__);
1583 break; 1583 break;
1584 } 1584 }
1585 1585
@@ -2014,8 +2014,6 @@ qla8044_watchdog(struct scsi_qla_host *vha)
2014 2014
2015 /* don't poll if reset is going on or FW hang in quiescent state */ 2015 /* don't poll if reset is going on or FW hang in quiescent state */
2016 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 2016 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2017 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2018 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
2019 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { 2017 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2020 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 2018 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2021 2019
@@ -3715,3 +3713,19 @@ exit_isp_reset:
3715 return rval; 3713 return rval;
3716} 3714}
3717 3715
3716void
3717qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
3718{
3719 struct qla_hw_data *ha = vha->hw;
3720
3721 if (!ha->allow_cna_fw_dump)
3722 return;
3723
3724 scsi_block_requests(vha->host);
3725 ha->flags.isp82xx_no_md_cap = 1;
3726 qla8044_idc_lock(ha);
3727 qla82xx_set_reset_owner(vha);
3728 qla8044_idc_unlock(ha);
3729 qla2x00_wait_for_chip_reset(vha);
3730 scsi_unblock_requests(vha->host);
3731}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 89a53002b585..19e99cc33724 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
120int ql2xenabledif = 2; 120int ql2xenabledif = 2;
121module_param(ql2xenabledif, int, S_IRUGO); 121module_param(ql2xenabledif, int, S_IRUGO);
122MODULE_PARM_DESC(ql2xenabledif, 122MODULE_PARM_DESC(ql2xenabledif,
123 " Enable T10-CRC-DIF " 123 " Enable T10-CRC-DIF:\n"
124 " Default is 0 - No DIF Support. 1 - Enable it" 124 " Default is 2.\n"
125 ", 2 - Enable DIF for all types, except Type 0."); 125 " 0 -- No DIF Support\n"
126 " 1 -- Enable DIF for all types\n"
127 " 2 -- Enable DIF for all types, except Type 0.\n");
126 128
127int ql2xenablehba_err_chk = 2; 129int ql2xenablehba_err_chk = 2;
128module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 130module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
129MODULE_PARM_DESC(ql2xenablehba_err_chk, 131MODULE_PARM_DESC(ql2xenablehba_err_chk,
130 " Enable T10-CRC-DIF Error isolation by HBA:\n" 132 " Enable T10-CRC-DIF Error isolation by HBA:\n"
131 " Default is 1.\n" 133 " Default is 2.\n"
132 " 0 -- Error isolation disabled\n" 134 " 0 -- Error isolation disabled\n"
133 " 1 -- Error isolation enabled only for DIX Type 0\n" 135 " 1 -- Error isolation enabled only for DIX Type 0\n"
134 " 2 -- Error isolation enabled for all Types\n"); 136 " 2 -- Error isolation enabled for all Types\n");
@@ -1975,7 +1977,7 @@ static struct isp_operations qla82xx_isp_ops = {
1975 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1977 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1976 .read_nvram = qla24xx_read_nvram_data, 1978 .read_nvram = qla24xx_read_nvram_data,
1977 .write_nvram = qla24xx_write_nvram_data, 1979 .write_nvram = qla24xx_write_nvram_data,
1978 .fw_dump = qla24xx_fw_dump, 1980 .fw_dump = qla82xx_fw_dump,
1979 .beacon_on = qla82xx_beacon_on, 1981 .beacon_on = qla82xx_beacon_on,
1980 .beacon_off = qla82xx_beacon_off, 1982 .beacon_off = qla82xx_beacon_off,
1981 .beacon_blink = NULL, 1983 .beacon_blink = NULL,
@@ -2013,11 +2015,11 @@ static struct isp_operations qla8044_isp_ops = {
2013 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2015 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2014 .read_nvram = NULL, 2016 .read_nvram = NULL,
2015 .write_nvram = NULL, 2017 .write_nvram = NULL,
2016 .fw_dump = qla24xx_fw_dump, 2018 .fw_dump = qla8044_fw_dump,
2017 .beacon_on = qla82xx_beacon_on, 2019 .beacon_on = qla82xx_beacon_on,
2018 .beacon_off = qla82xx_beacon_off, 2020 .beacon_off = qla82xx_beacon_off,
2019 .beacon_blink = NULL, 2021 .beacon_blink = NULL,
2020 .read_optrom = qla82xx_read_optrom_data, 2022 .read_optrom = qla8044_read_optrom_data,
2021 .write_optrom = qla8044_write_optrom_data, 2023 .write_optrom = qla8044_write_optrom_data,
2022 .get_flash_version = qla82xx_get_flash_version, 2024 .get_flash_version = qla82xx_get_flash_version,
2023 .start_scsi = qla82xx_start_scsi, 2025 .start_scsi = qla82xx_start_scsi,
@@ -2078,7 +2080,7 @@ static struct isp_operations qlafx00_isp_ops = {
2078 .intr_handler = qlafx00_intr_handler, 2080 .intr_handler = qlafx00_intr_handler,
2079 .enable_intrs = qlafx00_enable_intrs, 2081 .enable_intrs = qlafx00_enable_intrs,
2080 .disable_intrs = qlafx00_disable_intrs, 2082 .disable_intrs = qlafx00_disable_intrs,
2081 .abort_command = qlafx00_abort_command, 2083 .abort_command = qla24xx_async_abort_command,
2082 .target_reset = qlafx00_abort_target, 2084 .target_reset = qlafx00_abort_target,
2083 .lun_reset = qlafx00_lun_reset, 2085 .lun_reset = qlafx00_lun_reset,
2084 .fabric_login = NULL, 2086 .fabric_login = NULL,
@@ -2102,6 +2104,44 @@ static struct isp_operations qlafx00_isp_ops = {
2102 .initialize_adapter = qlafx00_initialize_adapter, 2104 .initialize_adapter = qlafx00_initialize_adapter,
2103}; 2105};
2104 2106
2107static struct isp_operations qla27xx_isp_ops = {
2108 .pci_config = qla25xx_pci_config,
2109 .reset_chip = qla24xx_reset_chip,
2110 .chip_diag = qla24xx_chip_diag,
2111 .config_rings = qla24xx_config_rings,
2112 .reset_adapter = qla24xx_reset_adapter,
2113 .nvram_config = qla81xx_nvram_config,
2114 .update_fw_options = qla81xx_update_fw_options,
2115 .load_risc = qla81xx_load_risc,
2116 .pci_info_str = qla24xx_pci_info_str,
2117 .fw_version_str = qla24xx_fw_version_str,
2118 .intr_handler = qla24xx_intr_handler,
2119 .enable_intrs = qla24xx_enable_intrs,
2120 .disable_intrs = qla24xx_disable_intrs,
2121 .abort_command = qla24xx_abort_command,
2122 .target_reset = qla24xx_abort_target,
2123 .lun_reset = qla24xx_lun_reset,
2124 .fabric_login = qla24xx_login_fabric,
2125 .fabric_logout = qla24xx_fabric_logout,
2126 .calc_req_entries = NULL,
2127 .build_iocbs = NULL,
2128 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2129 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2130 .read_nvram = NULL,
2131 .write_nvram = NULL,
2132 .fw_dump = qla27xx_fwdump,
2133 .beacon_on = qla24xx_beacon_on,
2134 .beacon_off = qla24xx_beacon_off,
2135 .beacon_blink = qla83xx_beacon_blink,
2136 .read_optrom = qla25xx_read_optrom_data,
2137 .write_optrom = qla24xx_write_optrom_data,
2138 .get_flash_version = qla24xx_get_flash_version,
2139 .start_scsi = qla24xx_dif_start_scsi,
2140 .abort_isp = qla2x00_abort_isp,
2141 .iospace_config = qla83xx_iospace_config,
2142 .initialize_adapter = qla2x00_initialize_adapter,
2143};
2144
2105static inline void 2145static inline void
2106qla2x00_set_isp_flags(struct qla_hw_data *ha) 2146qla2x00_set_isp_flags(struct qla_hw_data *ha)
2107{ 2147{
@@ -2223,21 +2263,29 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2223 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2263 case PCI_DEVICE_ID_QLOGIC_ISPF001:
2224 ha->device_type |= DT_ISPFX00; 2264 ha->device_type |= DT_ISPFX00;
2225 break; 2265 break;
2266 case PCI_DEVICE_ID_QLOGIC_ISP2071:
2267 ha->device_type |= DT_ISP2071;
2268 ha->device_type |= DT_ZIO_SUPPORTED;
2269 ha->device_type |= DT_FWI2;
2270 ha->device_type |= DT_IIDMA;
2271 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2272 break;
2226 } 2273 }
2227 2274
2228 if (IS_QLA82XX(ha)) 2275 if (IS_QLA82XX(ha))
2229 ha->port_no = !(ha->portnum & 1); 2276 ha->port_no = ha->portnum & 1;
2230 else 2277 else {
2231 /* Get adapter physical port no from interrupt pin register. */ 2278 /* Get adapter physical port no from interrupt pin register. */
2232 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2279 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2280 if (IS_QLA27XX(ha))
2281 ha->port_no--;
2282 else
2283 ha->port_no = !(ha->port_no & 1);
2284 }
2233 2285
2234 if (ha->port_no & 1)
2235 ha->flags.port0 = 1;
2236 else
2237 ha->flags.port0 = 0;
2238 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2286 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2239 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2287 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2240 ha->device_type, ha->flags.port0, ha->fw_srisc_address); 2288 ha->device_type, ha->port_no, ha->fw_srisc_address);
2241} 2289}
2242 2290
2243static void 2291static void
@@ -2297,7 +2345,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2297 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2345 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2298 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2346 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2299 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2347 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2300 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) { 2348 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2349 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
2301 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2350 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2302 mem_only = 1; 2351 mem_only = 1;
2303 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2352 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2334,13 +2383,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2334 spin_lock_init(&ha->hardware_lock); 2383 spin_lock_init(&ha->hardware_lock);
2335 spin_lock_init(&ha->vport_slock); 2384 spin_lock_init(&ha->vport_slock);
2336 mutex_init(&ha->selflogin_lock); 2385 mutex_init(&ha->selflogin_lock);
2386 mutex_init(&ha->optrom_mutex);
2337 2387
2338 /* Set ISP-type information. */ 2388 /* Set ISP-type information. */
2339 qla2x00_set_isp_flags(ha); 2389 qla2x00_set_isp_flags(ha);
2340 2390
2341 /* Set EEH reset type to fundamental if required by hba */ 2391 /* Set EEH reset type to fundamental if required by hba */
2342 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2392 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2343 IS_QLA83XX(ha)) 2393 IS_QLA83XX(ha) || IS_QLA27XX(ha))
2344 pdev->needs_freset = 1; 2394 pdev->needs_freset = 1;
2345 2395
2346 ha->prev_topology = 0; 2396 ha->prev_topology = 0;
@@ -2488,7 +2538,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2488 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2538 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
2489 req_length = REQUEST_ENTRY_CNT_FX00; 2539 req_length = REQUEST_ENTRY_CNT_FX00;
2490 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2540 rsp_length = RESPONSE_ENTRY_CNT_FX00;
2491 ha->init_cb_size = sizeof(struct init_cb_fx);
2492 ha->isp_ops = &qlafx00_isp_ops; 2541 ha->isp_ops = &qlafx00_isp_ops;
2493 ha->port_down_retry_count = 30; /* default value */ 2542 ha->port_down_retry_count = 30; /* default value */
2494 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2543 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
@@ -2497,6 +2546,22 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2497 ha->mr.fw_hbt_en = 1; 2546 ha->mr.fw_hbt_en = 1;
2498 ha->mr.host_info_resend = false; 2547 ha->mr.host_info_resend = false;
2499 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 2548 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
2549 } else if (IS_QLA27XX(ha)) {
2550 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2551 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2552 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2553 req_length = REQUEST_ENTRY_CNT_24XX;
2554 rsp_length = RESPONSE_ENTRY_CNT_2300;
2555 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2556 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2557 ha->gid_list_info_size = 8;
2558 ha->optrom_size = OPTROM_SIZE_83XX;
2559 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2560 ha->isp_ops = &qla27xx_isp_ops;
2561 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2562 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2563 ha->nvram_conf_off = ~0;
2564 ha->nvram_data_off = ~0;
2500 } 2565 }
2501 2566
2502 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2567 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2536,7 +2601,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2536 ha->flags.enable_64bit_addressing ? "enable" : 2601 ha->flags.enable_64bit_addressing ? "enable" :
2537 "disable"); 2602 "disable");
2538 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2603 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2539 if (!ret) { 2604 if (ret) {
2540 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2605 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2541 "Failed to allocate memory for adapter, aborting.\n"); 2606 "Failed to allocate memory for adapter, aborting.\n");
2542 2607
@@ -2561,10 +2626,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2561 2626
2562 host = base_vha->host; 2627 host = base_vha->host;
2563 base_vha->req = req; 2628 base_vha->req = req;
2564 if (IS_QLAFX00(ha))
2565 host->can_queue = QLAFX00_MAX_CANQUEUE;
2566 else
2567 host->can_queue = req->length + 128;
2568 if (IS_QLA2XXX_MIDTYPE(ha)) 2629 if (IS_QLA2XXX_MIDTYPE(ha))
2569 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2630 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2570 else 2631 else
@@ -2587,11 +2648,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2587 if (!IS_QLA82XX(ha)) 2648 if (!IS_QLA82XX(ha))
2588 host->sg_tablesize = QLA_SG_ALL; 2649 host->sg_tablesize = QLA_SG_ALL;
2589 } 2650 }
2590 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2591 "can_queue=%d, req=%p, "
2592 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2593 host->can_queue, base_vha->req,
2594 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2595 host->max_id = ha->max_fibre_devices; 2651 host->max_id = ha->max_fibre_devices;
2596 host->cmd_per_lun = 3; 2652 host->cmd_per_lun = 3;
2597 host->unique_id = host->host_no; 2653 host->unique_id = host->host_no;
@@ -2646,7 +2702,7 @@ que_init:
2646 req->req_q_out = &ha->iobase->isp24.req_q_out; 2702 req->req_q_out = &ha->iobase->isp24.req_q_out;
2647 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2703 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2648 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2704 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2649 if (ha->mqenable || IS_QLA83XX(ha)) { 2705 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2650 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2706 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2651 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2707 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2652 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2708 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2707,6 +2763,16 @@ que_init:
2707 goto probe_failed; 2763 goto probe_failed;
2708 } 2764 }
2709 2765
2766 if (IS_QLAFX00(ha))
2767 host->can_queue = QLAFX00_MAX_CANQUEUE;
2768 else
2769 host->can_queue = req->num_outstanding_cmds - 10;
2770
2771 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2772 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2773 host->can_queue, base_vha->req,
2774 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2775
2710 if (ha->mqenable) { 2776 if (ha->mqenable) {
2711 if (qla25xx_setup_mode(base_vha)) { 2777 if (qla25xx_setup_mode(base_vha)) {
2712 ql_log(ql_log_warn, base_vha, 0x00ec, 2778 ql_log(ql_log_warn, base_vha, 0x00ec,
@@ -2887,9 +2953,9 @@ probe_hw_failed:
2887iospace_config_failed: 2953iospace_config_failed:
2888 if (IS_P3P_TYPE(ha)) { 2954 if (IS_P3P_TYPE(ha)) {
2889 if (!ha->nx_pcibase) 2955 if (!ha->nx_pcibase)
2890 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2956 iounmap((device_reg_t *)ha->nx_pcibase);
2891 if (!ql2xdbwr) 2957 if (!ql2xdbwr)
2892 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 2958 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
2893 } else { 2959 } else {
2894 if (ha->iobase) 2960 if (ha->iobase)
2895 iounmap(ha->iobase); 2961 iounmap(ha->iobase);
@@ -3020,9 +3086,9 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
3020{ 3086{
3021 if (IS_QLA82XX(ha)) { 3087 if (IS_QLA82XX(ha)) {
3022 3088
3023 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 3089 iounmap((device_reg_t *)ha->nx_pcibase);
3024 if (!ql2xdbwr) 3090 if (!ql2xdbwr)
3025 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 3091 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3026 } else { 3092 } else {
3027 if (ha->iobase) 3093 if (ha->iobase)
3028 iounmap(ha->iobase); 3094 iounmap(ha->iobase);
@@ -3033,7 +3099,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
3033 if (ha->mqiobase) 3099 if (ha->mqiobase)
3034 iounmap(ha->mqiobase); 3100 iounmap(ha->mqiobase);
3035 3101
3036 if (IS_QLA83XX(ha) && ha->msixbase) 3102 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
3037 iounmap(ha->msixbase); 3103 iounmap(ha->msixbase);
3038 } 3104 }
3039} 3105}
@@ -3447,7 +3513,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3447 ha->npiv_info = NULL; 3513 ha->npiv_info = NULL;
3448 3514
3449 /* Get consistent memory allocated for EX-INIT-CB. */ 3515 /* Get consistent memory allocated for EX-INIT-CB. */
3450 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { 3516 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3451 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3517 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
3452 &ha->ex_init_cb_dma); 3518 &ha->ex_init_cb_dma);
3453 if (!ha->ex_init_cb) 3519 if (!ha->ex_init_cb)
@@ -3478,10 +3544,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3478 else { 3544 else {
3479 qla2x00_set_reserved_loop_ids(ha); 3545 qla2x00_set_reserved_loop_ids(ha);
3480 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3546 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3481 "loop_id_map=%p. \n", ha->loop_id_map); 3547 "loop_id_map=%p.\n", ha->loop_id_map);
3482 } 3548 }
3483 3549
3484 return 1; 3550 return 0;
3485 3551
3486fail_async_pd: 3552fail_async_pd:
3487 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3553 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -3562,22 +3628,28 @@ static void
3562qla2x00_free_fw_dump(struct qla_hw_data *ha) 3628qla2x00_free_fw_dump(struct qla_hw_data *ha)
3563{ 3629{
3564 if (ha->fce) 3630 if (ha->fce)
3565 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 3631 dma_free_coherent(&ha->pdev->dev,
3566 ha->fce_dma); 3632 FCE_SIZE, ha->fce, ha->fce_dma);
3567 3633
3568 if (ha->fw_dump) { 3634 if (ha->eft)
3569 if (ha->eft) 3635 dma_free_coherent(&ha->pdev->dev,
3570 dma_free_coherent(&ha->pdev->dev, 3636 EFT_SIZE, ha->eft, ha->eft_dma);
3571 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 3637
3638 if (ha->fw_dump)
3572 vfree(ha->fw_dump); 3639 vfree(ha->fw_dump);
3573 } 3640 if (ha->fw_dump_template)
3641 vfree(ha->fw_dump_template);
3642
3574 ha->fce = NULL; 3643 ha->fce = NULL;
3575 ha->fce_dma = 0; 3644 ha->fce_dma = 0;
3576 ha->eft = NULL; 3645 ha->eft = NULL;
3577 ha->eft_dma = 0; 3646 ha->eft_dma = 0;
3578 ha->fw_dump = NULL;
3579 ha->fw_dumped = 0; 3647 ha->fw_dumped = 0;
3580 ha->fw_dump_reading = 0; 3648 ha->fw_dump_reading = 0;
3649 ha->fw_dump = NULL;
3650 ha->fw_dump_len = 0;
3651 ha->fw_dump_template = NULL;
3652 ha->fw_dump_template_len = 0;
3581} 3653}
3582 3654
3583/* 3655/*
@@ -5242,7 +5314,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5242 5314
5243/* Firmware interface routines. */ 5315/* Firmware interface routines. */
5244 5316
5245#define FW_BLOBS 10 5317#define FW_BLOBS 11
5246#define FW_ISP21XX 0 5318#define FW_ISP21XX 0
5247#define FW_ISP22XX 1 5319#define FW_ISP22XX 1
5248#define FW_ISP2300 2 5320#define FW_ISP2300 2
@@ -5253,6 +5325,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5253#define FW_ISP82XX 7 5325#define FW_ISP82XX 7
5254#define FW_ISP2031 8 5326#define FW_ISP2031 8
5255#define FW_ISP8031 9 5327#define FW_ISP8031 9
5328#define FW_ISP2071 10
5256 5329
5257#define FW_FILE_ISP21XX "ql2100_fw.bin" 5330#define FW_FILE_ISP21XX "ql2100_fw.bin"
5258#define FW_FILE_ISP22XX "ql2200_fw.bin" 5331#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5264,6 +5337,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
5264#define FW_FILE_ISP82XX "ql8200_fw.bin" 5337#define FW_FILE_ISP82XX "ql8200_fw.bin"
5265#define FW_FILE_ISP2031 "ql2600_fw.bin" 5338#define FW_FILE_ISP2031 "ql2600_fw.bin"
5266#define FW_FILE_ISP8031 "ql8300_fw.bin" 5339#define FW_FILE_ISP8031 "ql8300_fw.bin"
5340#define FW_FILE_ISP2071 "ql2700_fw.bin"
5341
5267 5342
5268static DEFINE_MUTEX(qla_fw_lock); 5343static DEFINE_MUTEX(qla_fw_lock);
5269 5344
@@ -5278,6 +5353,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
5278 { .name = FW_FILE_ISP82XX, }, 5353 { .name = FW_FILE_ISP82XX, },
5279 { .name = FW_FILE_ISP2031, }, 5354 { .name = FW_FILE_ISP2031, },
5280 { .name = FW_FILE_ISP8031, }, 5355 { .name = FW_FILE_ISP8031, },
5356 { .name = FW_FILE_ISP2071, },
5281}; 5357};
5282 5358
5283struct fw_blob * 5359struct fw_blob *
@@ -5306,6 +5382,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
5306 blob = &qla_fw_blobs[FW_ISP2031]; 5382 blob = &qla_fw_blobs[FW_ISP2031];
5307 } else if (IS_QLA8031(ha)) { 5383 } else if (IS_QLA8031(ha)) {
5308 blob = &qla_fw_blobs[FW_ISP8031]; 5384 blob = &qla_fw_blobs[FW_ISP8031];
5385 } else if (IS_QLA2071(ha)) {
5386 blob = &qla_fw_blobs[FW_ISP2071];
5309 } else { 5387 } else {
5310 return NULL; 5388 return NULL;
5311 } 5389 }
@@ -5635,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5635 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5713 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
5636 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 5714 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
5637 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 5715 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
5716 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
5638 { 0 }, 5717 { 0 },
5639}; 5718};
5640MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5719MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bd56cde795fc..f28123e8ed65 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,7 +568,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
568 else if (IS_P3P_TYPE(ha)) { 568 else if (IS_P3P_TYPE(ha)) {
569 *start = FA_FLASH_LAYOUT_ADDR_82; 569 *start = FA_FLASH_LAYOUT_ADDR_82;
570 goto end; 570 goto end;
571 } else if (IS_QLA83XX(ha)) { 571 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
572 *start = FA_FLASH_LAYOUT_ADDR_83; 572 *start = FA_FLASH_LAYOUT_ADDR_83;
573 goto end; 573 goto end;
574 } 574 }
@@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
682 /* Assign FCP prio region since older adapters may not have FLT, or 682 /* Assign FCP prio region since older adapters may not have FLT, or
683 FCP prio region in it's FLT. 683 FCP prio region in it's FLT.
684 */ 684 */
685 ha->flt_region_fcp_prio = ha->flags.port0 ? 685 ha->flt_region_fcp_prio = (ha->port_no == 0) ?
686 fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; 686 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
687 687
688 ha->flt_region_flt = flt_addr; 688 ha->flt_region_flt = flt_addr;
@@ -743,47 +743,71 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
743 ha->flt_region_vpd_nvram = start; 743 ha->flt_region_vpd_nvram = start;
744 if (IS_P3P_TYPE(ha)) 744 if (IS_P3P_TYPE(ha))
745 break; 745 break;
746 if (ha->flags.port0) 746 if (ha->port_no == 0)
747 ha->flt_region_vpd = start; 747 ha->flt_region_vpd = start;
748 break; 748 break;
749 case FLT_REG_VPD_1: 749 case FLT_REG_VPD_1:
750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
751 break; 751 break;
752 if (!ha->flags.port0) 752 if (ha->port_no == 1)
753 ha->flt_region_vpd = start;
754 break;
755 case FLT_REG_VPD_2:
756 if (!IS_QLA27XX(ha))
757 break;
758 if (ha->port_no == 2)
759 ha->flt_region_vpd = start;
760 break;
761 case FLT_REG_VPD_3:
762 if (!IS_QLA27XX(ha))
763 break;
764 if (ha->port_no == 3)
753 ha->flt_region_vpd = start; 765 ha->flt_region_vpd = start;
754 break; 766 break;
755 case FLT_REG_NVRAM_0: 767 case FLT_REG_NVRAM_0:
756 if (IS_QLA8031(ha)) 768 if (IS_QLA8031(ha))
757 break; 769 break;
758 if (ha->flags.port0) 770 if (ha->port_no == 0)
759 ha->flt_region_nvram = start; 771 ha->flt_region_nvram = start;
760 break; 772 break;
761 case FLT_REG_NVRAM_1: 773 case FLT_REG_NVRAM_1:
762 if (IS_QLA8031(ha)) 774 if (IS_QLA8031(ha))
763 break; 775 break;
764 if (!ha->flags.port0) 776 if (ha->port_no == 1)
777 ha->flt_region_nvram = start;
778 break;
779 case FLT_REG_NVRAM_2:
780 if (!IS_QLA27XX(ha))
781 break;
782 if (ha->port_no == 2)
783 ha->flt_region_nvram = start;
784 break;
785 case FLT_REG_NVRAM_3:
786 if (!IS_QLA27XX(ha))
787 break;
788 if (ha->port_no == 3)
765 ha->flt_region_nvram = start; 789 ha->flt_region_nvram = start;
766 break; 790 break;
767 case FLT_REG_FDT: 791 case FLT_REG_FDT:
768 ha->flt_region_fdt = start; 792 ha->flt_region_fdt = start;
769 break; 793 break;
770 case FLT_REG_NPIV_CONF_0: 794 case FLT_REG_NPIV_CONF_0:
771 if (ha->flags.port0) 795 if (ha->port_no == 0)
772 ha->flt_region_npiv_conf = start; 796 ha->flt_region_npiv_conf = start;
773 break; 797 break;
774 case FLT_REG_NPIV_CONF_1: 798 case FLT_REG_NPIV_CONF_1:
775 if (!ha->flags.port0) 799 if (ha->port_no == 1)
776 ha->flt_region_npiv_conf = start; 800 ha->flt_region_npiv_conf = start;
777 break; 801 break;
778 case FLT_REG_GOLD_FW: 802 case FLT_REG_GOLD_FW:
779 ha->flt_region_gold_fw = start; 803 ha->flt_region_gold_fw = start;
780 break; 804 break;
781 case FLT_REG_FCP_PRIO_0: 805 case FLT_REG_FCP_PRIO_0:
782 if (ha->flags.port0) 806 if (ha->port_no == 0)
783 ha->flt_region_fcp_prio = start; 807 ha->flt_region_fcp_prio = start;
784 break; 808 break;
785 case FLT_REG_FCP_PRIO_1: 809 case FLT_REG_FCP_PRIO_1:
786 if (!ha->flags.port0) 810 if (ha->port_no == 1)
787 ha->flt_region_fcp_prio = start; 811 ha->flt_region_fcp_prio = start;
788 break; 812 break;
789 case FLT_REG_BOOT_CODE_82XX: 813 case FLT_REG_BOOT_CODE_82XX:
@@ -813,13 +837,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
813 case FLT_REG_FCOE_NVRAM_0: 837 case FLT_REG_FCOE_NVRAM_0:
814 if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) 838 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
815 break; 839 break;
816 if (ha->flags.port0) 840 if (ha->port_no == 0)
817 ha->flt_region_nvram = start; 841 ha->flt_region_nvram = start;
818 break; 842 break;
819 case FLT_REG_FCOE_NVRAM_1: 843 case FLT_REG_FCOE_NVRAM_1:
820 if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) 844 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
821 break; 845 break;
822 if (!ha->flags.port0) 846 if (ha->port_no == 1)
823 ha->flt_region_nvram = start; 847 ha->flt_region_nvram = start;
824 break; 848 break;
825 } 849 }
@@ -832,12 +856,12 @@ no_flash_data:
832 ha->flt_region_fw = def_fw[def]; 856 ha->flt_region_fw = def_fw[def];
833 ha->flt_region_boot = def_boot[def]; 857 ha->flt_region_boot = def_boot[def];
834 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 858 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
835 ha->flt_region_vpd = ha->flags.port0 ? 859 ha->flt_region_vpd = (ha->port_no == 0) ?
836 def_vpd0[def] : def_vpd1[def]; 860 def_vpd0[def] : def_vpd1[def];
837 ha->flt_region_nvram = ha->flags.port0 ? 861 ha->flt_region_nvram = (ha->port_no == 0) ?
838 def_nvram0[def] : def_nvram1[def]; 862 def_nvram0[def] : def_nvram1[def];
839 ha->flt_region_fdt = def_fdt[def]; 863 ha->flt_region_fdt = def_fdt[def];
840 ha->flt_region_npiv_conf = ha->flags.port0 ? 864 ha->flt_region_npiv_conf = (ha->port_no == 0) ?
841 def_npiv_conf0[def] : def_npiv_conf1[def]; 865 def_npiv_conf0[def] : def_npiv_conf1[def];
842done: 866done:
843 ql_dbg(ql_dbg_init, vha, 0x004a, 867 ql_dbg(ql_dbg_init, vha, 0x004a,
@@ -989,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
989 struct qla_hw_data *ha = vha->hw; 1013 struct qla_hw_data *ha = vha->hw;
990 1014
991 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 1015 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
992 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1016 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
993 return QLA_SUCCESS; 1017 return QLA_SUCCESS;
994 1018
995 ret = qla2xxx_find_flt_start(vha, &flt_addr); 1019 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1192,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1192 struct qla_hw_data *ha = vha->hw; 1216 struct qla_hw_data *ha = vha->hw;
1193 1217
1194 /* Prepare burst-capable write on supported ISPs. */ 1218 /* Prepare burst-capable write on supported ISPs. */
1195 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) && 1219 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1220 IS_QLA27XX(ha)) &&
1196 !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { 1221 !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
1197 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1222 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1198 &optrom_dma, GFP_KERNEL); 1223 &optrom_dma, GFP_KERNEL);
@@ -1675,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1675 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha))
1676 goto out; 1701 goto out;
1677 1702
1678 if (ha->flags.port0) 1703 if (ha->port_no == 0)
1679 led_select_value = QLA83XX_LED_PORT0; 1704 led_select_value = QLA83XX_LED_PORT0;
1680 else 1705 else
1681 led_select_value = QLA83XX_LED_PORT1; 1706 led_select_value = QLA83XX_LED_PORT1;
@@ -2332,7 +2357,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2332 */ 2357 */
2333 rest_addr = 0xffff; 2358 rest_addr = 0xffff;
2334 sec_mask = 0x10000; 2359 sec_mask = 0x10000;
2335 break; 2360 break;
2336 } 2361 }
2337 /* 2362 /*
2338 * ST m29w010b part - 16kb sector size 2363 * ST m29w010b part - 16kb sector size
@@ -2558,7 +2583,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2558 uint32_t faddr, left, burst; 2583 uint32_t faddr, left, burst;
2559 struct qla_hw_data *ha = vha->hw; 2584 struct qla_hw_data *ha = vha->hw;
2560 2585
2561 if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2586 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
2562 goto try_fast; 2587 goto try_fast;
2563 if (offset & 0xfff) 2588 if (offset & 0xfff)
2564 goto slow_read; 2589 goto slow_read;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 000000000000..a804e9b744bb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,909 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_tmpl.h"
9
10/* note default template is in big endian */
11static const uint32_t ql27xx_fwdt_default_template[] = {
12 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 0x10000000, 0x00000000, 0x00000080,
100};
101
102static inline void __iomem *
103qla27xx_isp_reg(struct scsi_qla_host *vha)
104{
105 return &vha->hw->iobase->isp24;
106}
107
108static inline void
109qla27xx_insert16(uint16_t value, void *buf, ulong *len)
110{
111 if (buf) {
112 buf += *len;
113 *(__le16 *)buf = cpu_to_le16(value);
114 }
115 *len += sizeof(value);
116}
117
118static inline void
119qla27xx_insert32(uint32_t value, void *buf, ulong *len)
120{
121 if (buf) {
122 buf += *len;
123 *(__le32 *)buf = cpu_to_le32(value);
124 }
125 *len += sizeof(value);
126}
127
128static inline void
129qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
130{
131 ulong cnt = size;
132
133 if (buf && mem) {
134 buf += *len;
135 while (cnt >= sizeof(uint32_t)) {
136 *(__le32 *)buf = cpu_to_le32p(mem);
137 buf += sizeof(uint32_t);
138 mem += sizeof(uint32_t);
139 cnt -= sizeof(uint32_t);
140 }
141 if (cnt)
142 memcpy(buf, mem, cnt);
143 }
144 *len += size;
145}
146
147static inline void
148qla27xx_read8(void *window, void *buf, ulong *len)
149{
150 uint8_t value = ~0;
151
152 if (buf) {
153 value = RD_REG_BYTE((__iomem void *)window);
154 ql_dbg(ql_dbg_misc, NULL, 0xd011,
155 "%s: -> %x\n", __func__, value);
156 }
157 qla27xx_insert32(value, buf, len);
158}
159
160static inline void
161qla27xx_read16(void *window, void *buf, ulong *len)
162{
163 uint16_t value = ~0;
164
165 if (buf) {
166 value = RD_REG_WORD((__iomem void *)window);
167 ql_dbg(ql_dbg_misc, NULL, 0xd012,
168 "%s: -> %x\n", __func__, value);
169 }
170 qla27xx_insert32(value, buf, len);
171}
172
173static inline void
174qla27xx_read32(void *window, void *buf, ulong *len)
175{
176 uint32_t value = ~0;
177
178 if (buf) {
179 value = RD_REG_DWORD((__iomem void *)window);
180 ql_dbg(ql_dbg_misc, NULL, 0xd013,
181 "%s: -> %x\n", __func__, value);
182 }
183 qla27xx_insert32(value, buf, len);
184}
185
186static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
187{
188 return
189 (width == 1) ? qla27xx_read8 :
190 (width == 2) ? qla27xx_read16 :
191 qla27xx_read32;
192}
193
194static inline void
195qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
196 uint offset, void *buf, ulong *len)
197{
198 void *window = (void *)reg + offset;
199
200 if (buf) {
201 ql_dbg(ql_dbg_misc, NULL, 0xd014,
202 "%s: @%x\n", __func__, offset);
203 }
204 qla27xx_insert32(offset, buf, len);
205 qla27xx_read32(window, buf, len);
206}
207
208static inline void
209qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
210 uint offset, uint32_t data, void *buf)
211{
212 __iomem void *window = reg + offset;
213
214 if (buf) {
215 ql_dbg(ql_dbg_misc, NULL, 0xd015,
216 "%s: @%x <- %x\n", __func__, offset, data);
217 WRT_REG_DWORD(window, data);
218 }
219}
220
221static inline void
222qla27xx_read_window(__iomem struct device_reg_24xx *reg,
223 uint32_t base, uint offset, uint count, uint width, void *buf,
224 ulong *len)
225{
226 void *window = (void *)reg + offset;
227 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
228
229 if (buf) {
230 ql_dbg(ql_dbg_misc, NULL, 0xd016,
231 "%s: base=%x offset=%x count=%x width=%x\n",
232 __func__, base, offset, count, width);
233 }
234 qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
235 while (count--) {
236 qla27xx_insert32(base, buf, len);
237 readn(window, buf, len);
238 window += width;
239 base += width;
240 }
241}
242
243static inline void
244qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
245{
246 if (buf)
247 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
248}
249
250static int
251qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
252 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
253{
254 ql_dbg(ql_dbg_misc, vha, 0xd100,
255 "%s: nop [%lx]\n", __func__, *len);
256 qla27xx_skip_entry(ent, buf);
257
258 return false;
259}
260
261static int
262qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
263 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
264{
265 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
266 "%s: end [%lx]\n", __func__, *len);
267 qla27xx_skip_entry(ent, buf);
268
269 /* terminate */
270 return true;
271}
272
273static int
274qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
275 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
276{
277 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
278
279 ql_dbg(ql_dbg_misc, vha, 0xd200,
280 "%s: rdio t1 [%lx]\n", __func__, *len);
281 qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
282 ent->t256.reg_count, ent->t256.reg_width, buf, len);
283
284 return false;
285}
286
287static int
288qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
289 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
290{
291 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
292
293 ql_dbg(ql_dbg_misc, vha, 0xd201,
294 "%s: wrio t1 [%lx]\n", __func__, *len);
295 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
296 qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
297
298 return false;
299}
300
301static int
302qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
303 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
304{
305 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
306
307 ql_dbg(ql_dbg_misc, vha, 0xd202,
308 "%s: rdio t2 [%lx]\n", __func__, *len);
309 qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
310 qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
311 ent->t258.reg_count, ent->t258.reg_width, buf, len);
312
313 return false;
314}
315
316static int
317qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
318 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
319{
320 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
321
322 ql_dbg(ql_dbg_misc, vha, 0xd203,
323 "%s: wrio t2 [%lx]\n", __func__, *len);
324 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
325 qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
326 qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
327
328 return false;
329}
330
331static int
332qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
333 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
334{
335 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
336
337 ql_dbg(ql_dbg_misc, vha, 0xd204,
338 "%s: rdpci [%lx]\n", __func__, *len);
339 qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
340
341 return false;
342}
343
344static int
345qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
346 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
347{
348 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
349
350 ql_dbg(ql_dbg_misc, vha, 0xd205,
351 "%s: wrpci [%lx]\n", __func__, *len);
352 qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
353
354 return false;
355}
356
357static int
358qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
359 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
360{
361 ulong dwords;
362 ulong start;
363 ulong end;
364
365 ql_dbg(ql_dbg_misc, vha, 0xd206,
366 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
367 start = ent->t262.start_addr;
368 end = ent->t262.end_addr;
369
370 if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
371 ;
372 } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
373 end = vha->hw->fw_memory_size;
374 if (buf)
375 ent->t262.end_addr = end;
376 } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
377 start = vha->hw->fw_shared_ram_start;
378 end = vha->hw->fw_shared_ram_end;
379 if (buf) {
380 ent->t262.start_addr = start;
381 ent->t262.end_addr = end;
382 }
383 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
384 ql_dbg(ql_dbg_misc, vha, 0xd021,
385 "%s: unsupported ddr ram\n", __func__);
386 qla27xx_skip_entry(ent, buf);
387 goto done;
388 } else {
389 ql_dbg(ql_dbg_misc, vha, 0xd022,
390 "%s: unknown area %u\n", __func__, ent->t262.ram_area);
391 qla27xx_skip_entry(ent, buf);
392 goto done;
393 }
394
395 if (end < start) {
396 ql_dbg(ql_dbg_misc, vha, 0xd023,
397 "%s: bad range (start=%x end=%x)\n", __func__,
398 ent->t262.end_addr, ent->t262.start_addr);
399 qla27xx_skip_entry(ent, buf);
400 goto done;
401 }
402
403 dwords = end - start + 1;
404 if (buf) {
405 ql_dbg(ql_dbg_misc, vha, 0xd024,
406 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
407 buf += *len;
408 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
409 }
410 *len += dwords * sizeof(uint32_t);
411done:
412 return false;
413}
414
415static int
416qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
417 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
418{
419 uint count = 0;
420 uint i;
421 uint length;
422
423 ql_dbg(ql_dbg_misc, vha, 0xd207,
424 "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
425 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
426 for (i = 0; i < vha->hw->max_req_queues; i++) {
427 struct req_que *req = vha->hw->req_q_map[i];
428 if (req || !buf) {
429 length = req ?
430 req->length : REQUEST_ENTRY_CNT_24XX;
431 qla27xx_insert16(i, buf, len);
432 qla27xx_insert16(length, buf, len);
433 qla27xx_insertbuf(req ? req->ring : NULL,
434 length * sizeof(*req->ring), buf, len);
435 count++;
436 }
437 }
438 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
439 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
440 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
441 if (rsp || !buf) {
442 length = rsp ?
443 rsp->length : RESPONSE_ENTRY_CNT_MQ;
444 qla27xx_insert16(i, buf, len);
445 qla27xx_insert16(length, buf, len);
446 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
447 length * sizeof(*rsp->ring), buf, len);
448 count++;
449 }
450 }
451 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
452 ql_dbg(ql_dbg_misc, vha, 0xd025,
453 "%s: unsupported atio queue\n", __func__);
454 qla27xx_skip_entry(ent, buf);
455 goto done;
456 } else {
457 ql_dbg(ql_dbg_misc, vha, 0xd026,
458 "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
459 qla27xx_skip_entry(ent, buf);
460 goto done;
461 }
462
463 if (buf)
464 ent->t263.num_queues = count;
465done:
466 return false;
467}
468
469static int
470qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
471 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
472{
473 ql_dbg(ql_dbg_misc, vha, 0xd208,
474 "%s: getfce [%lx]\n", __func__, *len);
475 if (vha->hw->fce) {
476 if (buf) {
477 ent->t264.fce_trace_size = FCE_SIZE;
478 ent->t264.write_pointer = vha->hw->fce_wr;
479 ent->t264.base_pointer = vha->hw->fce_dma;
480 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
481 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
482 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
483 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
484 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
485 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
486 }
487 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
488 } else {
489 ql_dbg(ql_dbg_misc, vha, 0xd027,
490 "%s: missing fce\n", __func__);
491 qla27xx_skip_entry(ent, buf);
492 }
493
494 return false;
495}
496
497static int
498qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
499 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
500{
501 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
502
503 ql_dbg(ql_dbg_misc, vha, 0xd209,
504 "%s: pause risc [%lx]\n", __func__, *len);
505 if (buf)
506 qla24xx_pause_risc(reg);
507
508 return false;
509}
510
511static int
512qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
513 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
514{
515 ql_dbg(ql_dbg_misc, vha, 0xd20a,
516 "%s: reset risc [%lx]\n", __func__, *len);
517 if (buf)
518 qla24xx_soft_reset(vha->hw);
519
520 return false;
521}
522
523static int
524qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
525 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
526{
527 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
528
529 ql_dbg(ql_dbg_misc, vha, 0xd20b,
530 "%s: dis intr [%lx]\n", __func__, *len);
531 qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
532
533 return false;
534}
535
536static int
537qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
538 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
539{
540 ql_dbg(ql_dbg_misc, vha, 0xd20c,
541 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
542 if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
543 if (vha->hw->eft) {
544 if (buf) {
545 ent->t268.buf_size = EFT_SIZE;
546 ent->t268.start_addr = vha->hw->eft_dma;
547 }
548 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
549 } else {
550 ql_dbg(ql_dbg_misc, vha, 0xd028,
551 "%s: missing eft\n", __func__);
552 qla27xx_skip_entry(ent, buf);
553 }
554 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
555 ql_dbg(ql_dbg_misc, vha, 0xd029,
556 "%s: unsupported exchange offload buffer\n", __func__);
557 qla27xx_skip_entry(ent, buf);
558 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
559 ql_dbg(ql_dbg_misc, vha, 0xd02a,
560 "%s: unsupported extended login buffer\n", __func__);
561 qla27xx_skip_entry(ent, buf);
562 } else {
563 ql_dbg(ql_dbg_misc, vha, 0xd02b,
564 "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
565 qla27xx_skip_entry(ent, buf);
566 }
567
568 return false;
569}
570
571static int
572qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
573 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
574{
575 ql_dbg(ql_dbg_misc, vha, 0xd20d,
576 "%s: scratch [%lx]\n", __func__, *len);
577 qla27xx_insert32(0xaaaaaaaa, buf, len);
578 qla27xx_insert32(0xbbbbbbbb, buf, len);
579 qla27xx_insert32(0xcccccccc, buf, len);
580 qla27xx_insert32(0xdddddddd, buf, len);
581 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
582 if (buf)
583 ent->t269.scratch_size = 5 * sizeof(uint32_t);
584
585 return false;
586}
587
588static int
589qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
590 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
591{
592 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
593 void *window = (void *)reg + 0xc4;
594 ulong dwords = ent->t270.count;
595 ulong addr = ent->t270.addr;
596
597 ql_dbg(ql_dbg_misc, vha, 0xd20e,
598 "%s: rdremreg [%lx]\n", __func__, *len);
599 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
600 while (dwords--) {
601 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
602 qla27xx_read_reg(reg, 0xc4, buf, len);
603 qla27xx_insert32(addr, buf, len);
604 qla27xx_read32(window, buf, len);
605 addr++;
606 }
607
608 return false;
609}
610
611static int
612qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
613 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
614{
615 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
616 ulong addr = ent->t271.addr;
617
618 ql_dbg(ql_dbg_misc, vha, 0xd20f,
619 "%s: wrremreg [%lx]\n", __func__, *len);
620 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
621 qla27xx_read_reg(reg, 0xc4, buf, len);
622 qla27xx_insert32(addr, buf, len);
623 qla27xx_write_reg(reg, 0xc0, addr, buf);
624
625 return false;
626}
627
628static int
629qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
630 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
631{
632 ulong dwords = ent->t272.count;
633 ulong start = ent->t272.addr;
634
635 ql_dbg(ql_dbg_misc, vha, 0xd210,
636 "%s: rdremram [%lx]\n", __func__, *len);
637 if (buf) {
638 ql_dbg(ql_dbg_misc, vha, 0xd02c,
639 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
640 buf += *len;
641 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
642 }
643 *len += dwords * sizeof(uint32_t);
644
645 return false;
646}
647
648static int
649qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
650 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
651{
652 ulong dwords = ent->t273.count;
653 ulong addr = ent->t273.addr;
654 uint32_t value;
655
656 ql_dbg(ql_dbg_misc, vha, 0xd211,
657 "%s: pcicfg [%lx]\n", __func__, *len);
658 while (dwords--) {
659 value = ~0;
660 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
661 ql_dbg(ql_dbg_misc, vha, 0xd02d,
662 "%s: failed pcicfg read at %lx\n", __func__, addr);
663 qla27xx_insert32(addr, buf, len);
664 qla27xx_insert32(value, buf, len);
665 addr += 4;
666 }
667
668 return false;
669}
670
671static int
672qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
673 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
674{
675 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
676 "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
677 qla27xx_skip_entry(ent, buf);
678
679 return false;
680}
681
682struct qla27xx_fwdt_entry_call {
683 int type;
684 int (*call)(
685 struct scsi_qla_host *,
686 struct qla27xx_fwdt_entry *,
687 void *,
688 ulong *);
689};
690
691static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
692 { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
693 { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
694 { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
695 { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
696 { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
697 { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
698 { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
699 { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
700 { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
701 { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
702 { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
703 { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
704 { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
705 { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
706 { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
707 { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
708 { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
709 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
710 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
711 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
712 { -1 , qla27xx_fwdt_entry_other }
713};
714
715static inline int (*qla27xx_find_entry(int type))
716 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
717{
718 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
719
720 while (list->type != -1 && list->type != type)
721 list++;
722
723 return list->call;
724}
725
726static inline void *
727qla27xx_next_entry(void *p)
728{
729 struct qla27xx_fwdt_entry *ent = p;
730
731 return p + ent->hdr.entry_size;
732}
733
734static void
735qla27xx_walk_template(struct scsi_qla_host *vha,
736 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
737{
738 struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
739 ulong count = tmp->entry_count;
740
741 ql_dbg(ql_dbg_misc, vha, 0xd01a,
742 "%s: entry count %lx\n", __func__, count);
743 while (count--) {
744 if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
745 break;
746 ent = qla27xx_next_entry(ent);
747 }
748 ql_dbg(ql_dbg_misc, vha, 0xd01b,
749 "%s: len=%lx\n", __func__, *len);
750}
751
752static void
753qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
754{
755 tmp->capture_timestamp = jiffies;
756}
757
758static void
759qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
760{
761 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
762 int rval = 0;
763
764 rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
765 v+0, v+1, v+2, v+3, v+4, v+5);
766
767 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
768 tmp->driver_info[1] = v[5] << 8 | v[4];
769 tmp->driver_info[2] = 0x12345678;
770}
771
772static void
773qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
774 struct scsi_qla_host *vha)
775{
776 tmp->firmware_version[0] = vha->hw->fw_major_version;
777 tmp->firmware_version[1] = vha->hw->fw_minor_version;
778 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
779 tmp->firmware_version[3] =
780 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
781 tmp->firmware_version[4] =
782 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
783}
784
785static void
786ql27xx_edit_template(struct scsi_qla_host *vha,
787 struct qla27xx_fwdt_template *tmp)
788{
789 qla27xx_time_stamp(tmp);
790 qla27xx_driver_info(tmp);
791 qla27xx_firmware_info(tmp, vha);
792}
793
794static inline uint32_t
795qla27xx_template_checksum(void *p, ulong size)
796{
797 uint32_t *buf = p;
798 uint64_t sum = 0;
799
800 size /= sizeof(*buf);
801
802 while (size--)
803 sum += *buf++;
804
805 sum = (sum & 0xffffffff) + (sum >> 32);
806
807 return ~sum;
808}
809
810static inline int
811qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
812{
813 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
814}
815
816static inline int
817qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
818{
819 return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
820}
821
822static void
823qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
824{
825 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
826 ulong len;
827
828 if (qla27xx_fwdt_template_valid(tmp)) {
829 len = tmp->template_size;
830 tmp = memcpy(vha->hw->fw_dump, tmp, len);
831 ql27xx_edit_template(vha, tmp);
832 qla27xx_walk_template(vha, tmp, tmp, &len);
833 vha->hw->fw_dump_len = len;
834 vha->hw->fw_dumped = 1;
835 }
836}
837
838ulong
839qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
840{
841 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
842 ulong len = 0;
843
844 if (qla27xx_fwdt_template_valid(tmp)) {
845 len = tmp->template_size;
846 qla27xx_walk_template(vha, tmp, NULL, &len);
847 }
848
849 return len;
850}
851
852ulong
853qla27xx_fwdt_template_size(void *p)
854{
855 struct qla27xx_fwdt_template *tmp = p;
856
857 return tmp->template_size;
858}
859
860ulong
861qla27xx_fwdt_template_default_size(void)
862{
863 return sizeof(ql27xx_fwdt_default_template);
864}
865
866const void *
867qla27xx_fwdt_template_default(void)
868{
869 return ql27xx_fwdt_default_template;
870}
871
872int
873qla27xx_fwdt_template_valid(void *p)
874{
875 struct qla27xx_fwdt_template *tmp = p;
876
877 if (!qla27xx_verify_template_header(tmp)) {
878 ql_log(ql_log_warn, NULL, 0xd01c,
879 "%s: template type %x\n", __func__, tmp->template_type);
880 return false;
881 }
882
883 if (!qla27xx_verify_template_checksum(tmp)) {
884 ql_log(ql_log_warn, NULL, 0xd01d,
885 "%s: failed template checksum\n", __func__);
886 return false;
887 }
888
889 return true;
890}
891
892void
893qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
894{
895 ulong flags = 0;
896
897 if (!hardware_locked)
898 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
899
900 if (!vha->hw->fw_dump)
901 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
902 else if (!vha->hw->fw_dump_template)
903 ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
904 else
905 qla27xx_execute_fwdt_template(vha);
906
907 if (!hardware_locked)
908 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
909}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 000000000000..c9d2fff4d964
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,205 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8#ifndef __QLA_DMP27_H__
9#define __QLA_DMP27_H__
10
11#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
12
13struct __packed qla27xx_fwdt_template {
14 uint32_t template_type;
15 uint32_t entry_offset;
16 uint32_t template_size;
17 uint32_t reserved_1;
18
19 uint32_t entry_count;
20 uint32_t template_version;
21 uint32_t capture_timestamp;
22 uint32_t template_checksum;
23
24 uint32_t reserved_2;
25 uint32_t driver_info[3];
26
27 uint32_t saved_state[16];
28
29 uint32_t reserved_3[8];
30 uint32_t firmware_version[5];
31};
32
33#define TEMPLATE_TYPE_FWDUMP 99
34
35#define ENTRY_TYPE_NOP 0
36#define ENTRY_TYPE_TMP_END 255
37#define ENTRY_TYPE_RD_IOB_T1 256
38#define ENTRY_TYPE_WR_IOB_T1 257
39#define ENTRY_TYPE_RD_IOB_T2 258
40#define ENTRY_TYPE_WR_IOB_T2 259
41#define ENTRY_TYPE_RD_PCI 260
42#define ENTRY_TYPE_WR_PCI 261
43#define ENTRY_TYPE_RD_RAM 262
44#define ENTRY_TYPE_GET_QUEUE 263
45#define ENTRY_TYPE_GET_FCE 264
46#define ENTRY_TYPE_PSE_RISC 265
47#define ENTRY_TYPE_RST_RISC 266
48#define ENTRY_TYPE_DIS_INTR 267
49#define ENTRY_TYPE_GET_HBUF 268
50#define ENTRY_TYPE_SCRATCH 269
51#define ENTRY_TYPE_RDREMREG 270
52#define ENTRY_TYPE_WRREMREG 271
53#define ENTRY_TYPE_RDREMRAM 272
54#define ENTRY_TYPE_PCICFG 273
55
56#define CAPTURE_FLAG_PHYS_ONLY BIT_0
57#define CAPTURE_FLAG_PHYS_VIRT BIT_1
58
59#define DRIVER_FLAG_SKIP_ENTRY BIT_7
60
61struct __packed qla27xx_fwdt_entry {
62 struct __packed {
63 uint32_t entry_type;
64 uint32_t entry_size;
65 uint32_t reserved_1;
66
67 uint8_t capture_flags;
68 uint8_t reserved_2[2];
69 uint8_t driver_flags;
70 } hdr;
71 union __packed {
72 struct __packed {
73 } t0;
74
75 struct __packed {
76 } t255;
77
78 struct __packed {
79 uint32_t base_addr;
80 uint8_t reg_width;
81 uint16_t reg_count;
82 uint8_t pci_offset;
83 } t256;
84
85 struct __packed {
86 uint32_t base_addr;
87 uint32_t write_data;
88 uint8_t pci_offset;
89 uint8_t reserved[3];
90 } t257;
91
92 struct __packed {
93 uint32_t base_addr;
94 uint8_t reg_width;
95 uint16_t reg_count;
96 uint8_t pci_offset;
97 uint8_t banksel_offset;
98 uint8_t reserved[3];
99 uint32_t bank;
100 } t258;
101
102 struct __packed {
103 uint32_t base_addr;
104 uint32_t write_data;
105 uint8_t reserved[2];
106 uint8_t pci_offset;
107 uint8_t banksel_offset;
108 uint32_t bank;
109 } t259;
110
111 struct __packed {
112 uint8_t pci_addr;
113 uint8_t reserved[3];
114 } t260;
115
116 struct __packed {
117 uint8_t pci_addr;
118 uint8_t reserved[3];
119 uint32_t write_data;
120 } t261;
121
122 struct __packed {
123 uint8_t ram_area;
124 uint8_t reserved[3];
125 uint32_t start_addr;
126 uint32_t end_addr;
127 } t262;
128
129 struct __packed {
130 uint32_t num_queues;
131 uint8_t queue_type;
132 uint8_t reserved[3];
133 } t263;
134
135 struct __packed {
136 uint32_t fce_trace_size;
137 uint64_t write_pointer;
138 uint64_t base_pointer;
139 uint32_t fce_enable_mb0;
140 uint32_t fce_enable_mb2;
141 uint32_t fce_enable_mb3;
142 uint32_t fce_enable_mb4;
143 uint32_t fce_enable_mb5;
144 uint32_t fce_enable_mb6;
145 } t264;
146
147 struct __packed {
148 } t265;
149
150 struct __packed {
151 } t266;
152
153 struct __packed {
154 uint8_t pci_offset;
155 uint8_t reserved[3];
156 uint32_t data;
157 } t267;
158
159 struct __packed {
160 uint8_t buf_type;
161 uint8_t reserved[3];
162 uint32_t buf_size;
163 uint64_t start_addr;
164 } t268;
165
166 struct __packed {
167 uint32_t scratch_size;
168 } t269;
169
170 struct __packed {
171 uint32_t addr;
172 uint32_t count;
173 } t270;
174
175 struct __packed {
176 uint32_t addr;
177 uint32_t data;
178 } t271;
179
180 struct __packed {
181 uint32_t addr;
182 uint32_t count;
183 } t272;
184
185 struct __packed {
186 uint32_t addr;
187 uint32_t count;
188 } t273;
189 };
190};
191
192#define T262_RAM_AREA_CRITICAL_RAM 1
193#define T262_RAM_AREA_EXTERNAL_RAM 2
194#define T262_RAM_AREA_SHARED_RAM 3
195#define T262_RAM_AREA_DDR_RAM 4
196
197#define T263_QUEUE_TYPE_REQ 1
198#define T263_QUEUE_TYPE_RSP 2
199#define T263_QUEUE_TYPE_ATIO 3
200
201#define T268_BUF_TYPE_EXTD_TRACE 1
202#define T268_BUF_TYPE_EXCH_BUFOFF 2
203#define T268_BUF_TYPE_EXTD_LOGIN 3
204
205#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 31d19535b015..e36b94712544 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.06.00.12-k" 10#define QLA2XXX_VERSION "8.07.00.02-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 6 13#define QLA_DRIVER_MINOR_VER 7
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 919284834ad7..2eba35365920 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1304,12 +1304,24 @@ static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1304static int qla4_83xx_restart(struct scsi_qla_host *ha) 1304static int qla4_83xx_restart(struct scsi_qla_host *ha)
1305{ 1305{
1306 int ret_val = QLA_SUCCESS; 1306 int ret_val = QLA_SUCCESS;
1307 uint32_t idc_ctrl;
1307 1308
1308 qla4_83xx_process_stop_seq(ha); 1309 qla4_83xx_process_stop_seq(ha);
1309 1310
1310 /* Collect minidump*/ 1311 /*
1311 if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags)) 1312 * Collect minidump.
1313 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
1314 * don't collect minidump
1315 */
1316 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
1317 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1318 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
1319 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1320 ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
1321 __func__);
1322 } else {
1312 qla4_8xxx_get_minidump(ha); 1323 qla4_8xxx_get_minidump(ha);
1324 }
1313 1325
1314 qla4_83xx_process_init_seq(ha); 1326 qla4_83xx_process_init_seq(ha);
1315 1327
@@ -1664,3 +1676,23 @@ void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1664 __qla4_83xx_disable_pause(ha); 1676 __qla4_83xx_disable_pause(ha);
1665 ha->isp_ops->idc_unlock(ha); 1677 ha->isp_ops->idc_unlock(ha);
1666} 1678}
1679
1680/**
1681 * qla4_83xx_is_detached - Check if we are marked invisible.
1682 * @ha: Pointer to host adapter structure.
1683 **/
1684int qla4_83xx_is_detached(struct scsi_qla_host *ha)
1685{
1686 uint32_t drv_active;
1687
1688 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1689
1690 if (test_bit(AF_INIT_DONE, &ha->flags) &&
1691 !(drv_active & (1 << ha->func_num))) {
1692 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
1693 __func__, drv_active));
1694 return QLA_SUCCESS;
1695 }
1696
1697 return QLA_ERROR;
1698}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 04a0027dbca0..9f92cbf96477 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -517,7 +517,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
517 (ha->idc_extend_tmo * HZ))) { 517 (ha->idc_extend_tmo * HZ))) {
518 ha->notify_idc_comp = 0; 518 ha->notify_idc_comp = 0;
519 ha->notify_link_up_comp = 0; 519 ha->notify_link_up_comp = 0;
520 ql4_printk(KERN_WARNING, ha, "%s: IDC Complete notification not received", 520 ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
521 __func__); 521 __func__);
522 status = QLA_ERROR; 522 status = QLA_ERROR;
523 goto exit_wait; 523 goto exit_wait;
@@ -538,7 +538,7 @@ static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
538 if (!wait_for_completion_timeout(&ha->link_up_comp, 538 if (!wait_for_completion_timeout(&ha->link_up_comp,
539 (IDC_COMP_TOV * HZ))) { 539 (IDC_COMP_TOV * HZ))) {
540 ha->notify_link_up_comp = 0; 540 ha->notify_link_up_comp = 0;
541 ql4_printk(KERN_WARNING, ha, "%s: LINK UP notification not received", 541 ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
542 __func__); 542 __func__);
543 status = QLA_ERROR; 543 status = QLA_ERROR;
544 goto exit_wait; 544 goto exit_wait;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aa67bb9a4426..73a502288bde 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -194,7 +194,7 @@
194#define ADAPTER_INIT_TOV 30 194#define ADAPTER_INIT_TOV 30
195#define ADAPTER_RESET_TOV 180 195#define ADAPTER_RESET_TOV 180
196#define EXTEND_CMD_TOV 60 196#define EXTEND_CMD_TOV 60
197#define WAIT_CMD_TOV 30 197#define WAIT_CMD_TOV 5
198#define EH_WAIT_CMD_TOV 120 198#define EH_WAIT_CMD_TOV 120
199#define FIRMWARE_UP_TOV 60 199#define FIRMWARE_UP_TOV 60
200#define RESET_FIRMWARE_TOV 30 200#define RESET_FIRMWARE_TOV 30
@@ -297,6 +297,8 @@ struct ddb_entry {
297 297
298 /* Driver Re-login */ 298 /* Driver Re-login */
299 unsigned long flags; /* DDB Flags */ 299 unsigned long flags; /* DDB Flags */
300#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */
301
300 uint16_t default_relogin_timeout; /* Max time to wait for 302 uint16_t default_relogin_timeout; /* Max time to wait for
301 * relogin to complete */ 303 * relogin to complete */
302 atomic_t retry_relogin_timer; /* Min Time between relogins 304 atomic_t retry_relogin_timer; /* Min Time between relogins
@@ -580,7 +582,6 @@ struct scsi_qla_host {
580#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ 582#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
581#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */ 583#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
582#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ 584#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
583#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
584#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */ 585#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
585#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */ 586#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
586 587
@@ -595,10 +596,10 @@ struct scsi_qla_host {
595#define DPC_AEN 9 /* 0x00000200 */ 596#define DPC_AEN 9 /* 0x00000200 */
596#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ 597#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
597#define DPC_LINK_CHANGED 18 /* 0x00040000 */ 598#define DPC_LINK_CHANGED 18 /* 0x00040000 */
598#define DPC_RESET_ACTIVE 20 /* 0x00040000 */ 599#define DPC_RESET_ACTIVE 20 /* 0x00100000 */
599#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ 600#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/
600#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ 601#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
601#define DPC_POST_IDC_ACK 23 /* 0x00200000 */ 602#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
602#define DPC_RESTORE_ACB 24 /* 0x01000000 */ 603#define DPC_RESTORE_ACB 24 /* 0x01000000 */
603 604
604 struct Scsi_Host *host; /* pointer to host data */ 605 struct Scsi_Host *host; /* pointer to host data */
@@ -768,6 +769,7 @@ struct scsi_qla_host {
768 uint32_t fw_dump_capture_mask; 769 uint32_t fw_dump_capture_mask;
769 void *fw_dump_tmplt_hdr; 770 void *fw_dump_tmplt_hdr;
770 uint32_t fw_dump_tmplt_size; 771 uint32_t fw_dump_tmplt_size;
772 uint32_t fw_dump_skip_size;
771 773
772 struct completion mbx_intr_comp; 774 struct completion mbx_intr_comp;
773 775
@@ -910,7 +912,8 @@ static inline int is_qla80XX(struct scsi_qla_host *ha)
910static inline int is_aer_supported(struct scsi_qla_host *ha) 912static inline int is_aer_supported(struct scsi_qla_host *ha)
911{ 913{
912 return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) || 914 return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
913 (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324)); 915 (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
916 (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
914} 917}
915 918
916static inline int adapter_up(struct scsi_qla_host *ha) 919static inline int adapter_up(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 8d4092b33c07..209853ce0bbc 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -390,6 +390,7 @@ struct qla_flt_region {
390#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 390#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
391#define MBOX_CMD_CONN_OPEN 0x0074 391#define MBOX_CMD_CONN_OPEN 0x0074
392#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 392#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
393#define DDB_NOT_LOGGED_IN 0x09
393#define LOGOUT_OPTION_CLOSE_SESSION 0x0002 394#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
394#define LOGOUT_OPTION_RELOGIN 0x0004 395#define LOGOUT_OPTION_RELOGIN 0x0004
395#define LOGOUT_OPTION_FREE_DDB 0x0008 396#define LOGOUT_OPTION_FREE_DDB 0x0008
@@ -505,9 +506,9 @@ struct qla_flt_region {
505#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028 506#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
506#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029 507#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
507#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A 508#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
508#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B 509#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B
509#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 510#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C
510#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 511#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D
511#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 512#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
512#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031 513#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
513#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036 514#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
@@ -528,14 +529,14 @@ struct qla_flt_region {
528#define ACB_CONFIG_DISABLE 0x00 529#define ACB_CONFIG_DISABLE 0x00
529#define ACB_CONFIG_SET 0x01 530#define ACB_CONFIG_SET 0x01
530 531
531/* ACB State Defines */ 532/* ACB/IP Address State Defines */
532#define ACB_STATE_UNCONFIGURED 0x00 533#define IP_ADDRSTATE_UNCONFIGURED 0
533#define ACB_STATE_INVALID 0x01 534#define IP_ADDRSTATE_INVALID 1
534#define ACB_STATE_ACQUIRING 0x02 535#define IP_ADDRSTATE_ACQUIRING 2
535#define ACB_STATE_TENTATIVE 0x03 536#define IP_ADDRSTATE_TENTATIVE 3
536#define ACB_STATE_DEPRICATED 0x04 537#define IP_ADDRSTATE_DEPRICATED 4
537#define ACB_STATE_VALID 0x05 538#define IP_ADDRSTATE_PREFERRED 5
538#define ACB_STATE_DISABLING 0x06 539#define IP_ADDRSTATE_DISABLING 6
539 540
540/* FLASH offsets */ 541/* FLASH offsets */
541#define FLASH_SEGMENT_IFCB 0x04000000 542#define FLASH_SEGMENT_IFCB 0x04000000
@@ -698,14 +699,6 @@ struct addr_ctrl_blk {
698 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */ 699 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
699 uint8_t ipv6_addr0_state; /* 223 */ 700 uint8_t ipv6_addr0_state; /* 223 */
700 uint8_t ipv6_addr1_state; /* 224 */ 701 uint8_t ipv6_addr1_state; /* 224 */
701#define IP_ADDRSTATE_UNCONFIGURED 0
702#define IP_ADDRSTATE_INVALID 1
703#define IP_ADDRSTATE_ACQUIRING 2
704#define IP_ADDRSTATE_TENTATIVE 3
705#define IP_ADDRSTATE_DEPRICATED 4
706#define IP_ADDRSTATE_PREFERRED 5
707#define IP_ADDRSTATE_DISABLING 6
708
709 uint8_t ipv6_dflt_rtr_state; /* 225 */ 702 uint8_t ipv6_dflt_rtr_state; /* 225 */
710#define IPV6_RTRSTATE_UNKNOWN 0 703#define IPV6_RTRSTATE_UNKNOWN 0
711#define IPV6_RTRSTATE_MANUAL 1 704#define IPV6_RTRSTATE_MANUAL 1
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d67c50e0b896..b1a19cd8d5b2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -279,6 +279,8 @@ int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
279uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); 279uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
280int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); 280int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
281int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); 281int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
282int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
283int qla4_83xx_is_detached(struct scsi_qla_host *ha);
282 284
283extern int ql4xextended_error_logging; 285extern int ql4xextended_error_logging;
284extern int ql4xdontresethba; 286extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 7456eeb2e58a..28fbece7e08f 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -959,13 +959,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
959 qla4xxx_build_ddb_list(ha, is_reset); 959 qla4xxx_build_ddb_list(ha, is_reset);
960 960
961 set_bit(AF_ONLINE, &ha->flags); 961 set_bit(AF_ONLINE, &ha->flags);
962exit_init_hba:
963 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
964 /* Since interrupts are registered in start_firmware for
965 * 80XX, release them here if initialize_adapter fails */
966 qla4xxx_free_irqs(ha);
967 }
968 962
963exit_init_hba:
969 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, 964 DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
970 status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); 965 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
971 return status; 966 return status;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a3c8bc7706c2..b1925d195f41 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
385 385
386 cls_conn = ddb_entry->conn; 386 cls_conn = ddb_entry->conn;
387 conn = cls_conn->dd_data; 387 conn = cls_conn->dd_data;
388 spin_lock(&conn->session->lock); 388 spin_lock(&conn->session->back_lock);
389 task = iscsi_itt_to_task(conn, itt); 389 task = iscsi_itt_to_task(conn, itt);
390 spin_unlock(&conn->session->lock); 390 spin_unlock(&conn->session->back_lock);
391 391
392 if (task == NULL) { 392 if (task == NULL) {
393 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); 393 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
@@ -635,6 +635,18 @@ static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
635 } 635 }
636} 636}
637 637
638static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
639 uint32_t *mbox_sts)
640{
641 memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
642 &mbox_sts[2], sizeof(uint32_t));
643 memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
644 &mbox_sts[3], sizeof(uint32_t));
645 memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
646 &mbox_sts[4], sizeof(uint32_t));
647 memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
648 &mbox_sts[5], sizeof(uint32_t));
649}
638 650
639/** 651/**
640 * qla4xxx_isr_decode_mailbox - decodes mailbox status 652 * qla4xxx_isr_decode_mailbox - decodes mailbox status
@@ -781,27 +793,44 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
781 mbox_sts[3]); 793 mbox_sts[3]);
782 /* mbox_sts[2] = Old ACB state 794 /* mbox_sts[2] = Old ACB state
783 * mbox_sts[3] = new ACB state */ 795 * mbox_sts[3] = new ACB state */
784 if ((mbox_sts[3] == ACB_STATE_VALID) && 796 if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
785 ((mbox_sts[2] == ACB_STATE_TENTATIVE) || 797 ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
786 (mbox_sts[2] == ACB_STATE_ACQUIRING))) { 798 (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
787 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 799 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
788 } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 800 } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
789 (mbox_sts[2] == ACB_STATE_VALID)) { 801 (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
790 if (is_qla80XX(ha)) 802 if (is_qla80XX(ha))
791 set_bit(DPC_RESET_HA_FW_CONTEXT, 803 set_bit(DPC_RESET_HA_FW_CONTEXT,
792 &ha->dpc_flags); 804 &ha->dpc_flags);
793 else 805 else
794 set_bit(DPC_RESET_HA, &ha->dpc_flags); 806 set_bit(DPC_RESET_HA, &ha->dpc_flags);
795 } else if (mbox_sts[3] == ACB_STATE_DISABLING) { 807 } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
796 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n", 808 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
797 ha->host_no, __func__); 809 ha->host_no, __func__);
798 } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) { 810 } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
799 complete(&ha->disable_acb_comp); 811 complete(&ha->disable_acb_comp);
800 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n", 812 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
801 ha->host_no, __func__); 813 ha->host_no, __func__);
802 } 814 }
803 break; 815 break;
804 816
817 case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
818 case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
819 case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
820 /* No action */
821 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
822 ha->host_no, mbox_status));
823 break;
824
825 case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
826 DEBUG2(ql4_printk(KERN_INFO, ha,
827 "scsi%ld: AEN %04x, IPv6 ERROR, "
828 "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
829 ha->host_no, mbox_sts[0], mbox_sts[1],
830 mbox_sts[2], mbox_sts[3], mbox_sts[4],
831 mbox_sts[5]));
832 break;
833
805 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 834 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
806 case MBOX_ASTS_DNS: 835 case MBOX_ASTS_DNS:
807 /* No action */ 836 /* No action */
@@ -939,6 +968,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
939 DEBUG2(ql4_printk(KERN_INFO, ha, 968 DEBUG2(ql4_printk(KERN_INFO, ha,
940 "scsi%ld: AEN %04x Received IPv6 default router changed notification\n", 969 "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
941 ha->host_no, mbox_sts[0])); 970 ha->host_no, mbox_sts[0]));
971 qla4xxx_default_router_changed(ha, mbox_sts);
942 break; 972 break;
943 973
944 case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: 974 case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
@@ -1022,7 +1052,8 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
1022 uint32_t intr_status) 1052 uint32_t intr_status)
1023{ 1053{
1024 /* Process response queue interrupt. */ 1054 /* Process response queue interrupt. */
1025 if (intr_status & HSRX_RISC_IOCB_INT) 1055 if ((intr_status & HSRX_RISC_IOCB_INT) &&
1056 test_bit(AF_INIT_DONE, &ha->flags))
1026 qla4xxx_process_response_queue(ha); 1057 qla4xxx_process_response_queue(ha);
1027 1058
1028 /* Process mailbox/asynch event interrupt.*/ 1059 /* Process mailbox/asynch event interrupt.*/
@@ -1399,6 +1430,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1399{ 1430{
1400 struct scsi_qla_host *ha = dev_id; 1431 struct scsi_qla_host *ha = dev_id;
1401 unsigned long flags; 1432 unsigned long flags;
1433 int intr_status;
1402 uint32_t ival = 0; 1434 uint32_t ival = 0;
1403 1435
1404 spin_lock_irqsave(&ha->hardware_lock, flags); 1436 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1412,8 +1444,15 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1412 qla4xxx_process_response_queue(ha); 1444 qla4xxx_process_response_queue(ha);
1413 writel(0, &ha->qla4_83xx_reg->iocb_int_mask); 1445 writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1414 } else { 1446 } else {
1415 qla4xxx_process_response_queue(ha); 1447 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1416 writel(0, &ha->qla4_82xx_reg->host_int); 1448 if (intr_status & HSRX_RISC_IOCB_INT) {
1449 qla4xxx_process_response_queue(ha);
1450 writel(0, &ha->qla4_82xx_reg->host_int);
1451 } else {
1452 ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
1453 __func__);
1454 goto exit_msix_rsp_q;
1455 }
1417 } 1456 }
1418 ha->isr_count++; 1457 ha->isr_count++;
1419exit_msix_rsp_q: 1458exit_msix_rsp_q:
@@ -1488,6 +1527,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1488int qla4xxx_request_irqs(struct scsi_qla_host *ha) 1527int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1489{ 1528{
1490 int ret; 1529 int ret;
1530 int rval = QLA_ERROR;
1491 1531
1492 if (is_qla40XX(ha)) 1532 if (is_qla40XX(ha))
1493 goto try_intx; 1533 goto try_intx;
@@ -1568,9 +1608,10 @@ irq_attached:
1568 set_bit(AF_IRQ_ATTACHED, &ha->flags); 1608 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1569 ha->host->irq = ha->pdev->irq; 1609 ha->host->irq = ha->pdev->irq;
1570 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", 1610 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1571 __func__, ha->pdev->irq); 1611 __func__, ha->pdev->irq);
1612 rval = QLA_SUCCESS;
1572irq_not_attached: 1613irq_not_attached:
1573 return ret; 1614 return rval;
1574} 1615}
1575 1616
1576void qla4xxx_free_irqs(struct scsi_qla_host *ha) 1617void qla4xxx_free_irqs(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 9ae8ca3b69f9..0a6b782d6fdb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -212,9 +212,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
212 ha->host_no, __func__)); 212 ha->host_no, __func__));
213 goto mbox_exit; 213 goto mbox_exit;
214 } 214 }
215 DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...," 215 ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
216 " Scheduling Adapter Reset\n", ha->host_no, 216 ha->host_no, mbx_cmd[0]);
217 mbx_cmd[0]));
218 ha->mailbox_timeout_count++; 217 ha->mailbox_timeout_count++;
219 mbx_sts[0] = (-1); 218 mbx_sts[0] = (-1);
220 set_bit(DPC_RESET_HA, &ha->dpc_flags); 219 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -251,15 +250,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
251 break; 250 break;
252 251
253 case MBOX_STS_BUSY: 252 case MBOX_STS_BUSY:
254 DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n", 253 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
255 ha->host_no, __func__, mbx_cmd[0])); 254 ha->host_no, __func__, mbx_cmd[0]);
256 ha->mailbox_timeout_count++; 255 ha->mailbox_timeout_count++;
257 break; 256 break;
258 257
259 default: 258 default:
260 DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, " 259 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
261 "sts = %08X ****\n", ha->host_no, __func__, 260 ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
262 mbx_cmd[0], mbx_sts[0])); 261 mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
262 mbx_sts[5], mbx_sts[6], mbx_sts[7]);
263 break; 263 break;
264 } 264 }
265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 265 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -383,7 +383,6 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
383 mbox_cmd[2] = LSDW(init_fw_cb_dma); 383 mbox_cmd[2] = LSDW(init_fw_cb_dma);
384 mbox_cmd[3] = MSDW(init_fw_cb_dma); 384 mbox_cmd[3] = MSDW(init_fw_cb_dma);
385 mbox_cmd[4] = sizeof(struct addr_ctrl_blk); 385 mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
386 mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
387 386
388 if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) != 387 if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
389 QLA_SUCCESS) { 388 QLA_SUCCESS) {
@@ -648,9 +647,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
648 goto exit_init_fw_cb; 647 goto exit_init_fw_cb;
649 } 648 }
650 649
651 /* Initialize request and response queues. */
652 qla4xxx_init_rings(ha);
653
654 /* Fill in the request and response queue information. */ 650 /* Fill in the request and response queue information. */
655 init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); 651 init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
656 init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); 652 init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
@@ -1002,6 +998,10 @@ int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
1002 "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " 998 "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
1003 "failed sts %04X %04X", __func__, 999 "failed sts %04X %04X", __func__,
1004 mbox_sts[0], mbox_sts[1])); 1000 mbox_sts[0], mbox_sts[1]));
1001 if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
1002 (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
1003 set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
1004 }
1005 } 1005 }
1006 1006
1007 return status; 1007 return status;
@@ -1918,6 +1918,7 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
1918 mbox_sts[0], mbox_sts[1], mbox_sts[2])); 1918 mbox_sts[0], mbox_sts[1], mbox_sts[2]));
1919 } else { 1919 } else {
1920 if (is_qla8042(ha) && 1920 if (is_qla8042(ha) &&
1921 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
1921 (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) { 1922 (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
1922 /* 1923 /*
1923 * Disable ACB mailbox command takes time to complete 1924 * Disable ACB mailbox command takes time to complete
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d001202d3565..63328c812b70 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2383,6 +2383,11 @@ static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2383 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", 2383 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2384 ha->host_no, index, entry_hdr->entry_type, 2384 ha->host_no, index, entry_hdr->entry_type,
2385 entry_hdr->d_ctrl.entry_capture_mask)); 2385 entry_hdr->d_ctrl.entry_capture_mask));
2386 /* If driver encounters a new entry type that it cannot process,
2387 * it should just skip the entry and adjust the total buffer size by
2388 * from subtracting the skipped bytes from it
2389 */
2390 ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
2386} 2391}
2387 2392
2388/* ISP83xx functions to process new minidump entries... */ 2393/* ISP83xx functions to process new minidump entries... */
@@ -2590,6 +2595,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2590 uint64_t now; 2595 uint64_t now;
2591 uint32_t timestamp; 2596 uint32_t timestamp;
2592 2597
2598 ha->fw_dump_skip_size = 0;
2593 if (!ha->fw_dump) { 2599 if (!ha->fw_dump) {
2594 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n", 2600 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
2595 __func__, ha->host_no); 2601 __func__, ha->host_no);
@@ -2761,7 +2767,7 @@ skip_nxt_entry:
2761 entry_hdr->entry_size); 2767 entry_hdr->entry_size);
2762 } 2768 }
2763 2769
2764 if (data_collected != ha->fw_dump_size) { 2770 if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
2765 ql4_printk(KERN_INFO, ha, 2771 ql4_printk(KERN_INFO, ha,
2766 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", 2772 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
2767 data_collected, ha->fw_dump_size); 2773 data_collected, ha->fw_dump_size);
@@ -2820,63 +2826,35 @@ void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
2820int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) 2826int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2821{ 2827{
2822 int rval = QLA_ERROR; 2828 int rval = QLA_ERROR;
2823 int i, timeout; 2829 int i;
2824 uint32_t old_count, count, idc_ctrl; 2830 uint32_t old_count, count;
2825 int need_reset = 0, peg_stuck = 1; 2831 int need_reset = 0;
2826 2832
2827 need_reset = ha->isp_ops->need_reset(ha); 2833 need_reset = ha->isp_ops->need_reset(ha);
2828 old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2829
2830 for (i = 0; i < 10; i++) {
2831 timeout = msleep_interruptible(200);
2832 if (timeout) {
2833 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2834 QLA8XXX_DEV_FAILED);
2835 return rval;
2836 }
2837
2838 count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2839 if (count != old_count)
2840 peg_stuck = 0;
2841 }
2842 2834
2843 if (need_reset) { 2835 if (need_reset) {
2844 /* We are trying to perform a recovery here. */ 2836 /* We are trying to perform a recovery here. */
2845 if (peg_stuck) 2837 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2846 ha->isp_ops->rom_lock_recovery(ha); 2838 ha->isp_ops->rom_lock_recovery(ha);
2847 goto dev_initialize;
2848 } else { 2839 } else {
2849 /* Start of day for this ha context. */ 2840 old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2850 if (peg_stuck) { 2841 for (i = 0; i < 10; i++) {
2851 /* Either we are the first or recovery in progress. */ 2842 msleep(200);
2852 ha->isp_ops->rom_lock_recovery(ha); 2843 count = qla4_8xxx_rd_direct(ha,
2853 goto dev_initialize; 2844 QLA8XXX_PEG_ALIVE_COUNTER);
2854 } else { 2845 if (count != old_count) {
2855 /* Firmware already running. */ 2846 rval = QLA_SUCCESS;
2856 rval = QLA_SUCCESS; 2847 goto dev_ready;
2857 goto dev_ready; 2848 }
2858 } 2849 }
2850 ha->isp_ops->rom_lock_recovery(ha);
2859 } 2851 }
2860 2852
2861dev_initialize:
2862 /* set to DEV_INITIALIZING */ 2853 /* set to DEV_INITIALIZING */
2863 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 2854 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
2864 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 2855 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2865 QLA8XXX_DEV_INITIALIZING); 2856 QLA8XXX_DEV_INITIALIZING);
2866 2857
2867 /*
2868 * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
2869 * reset it after device goes to INIT state.
2870 */
2871 if (is_qla8032(ha) || is_qla8042(ha)) {
2872 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
2873 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
2874 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
2875 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
2876 set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
2877 }
2878 }
2879
2880 ha->isp_ops->idc_unlock(ha); 2858 ha->isp_ops->idc_unlock(ha);
2881 2859
2882 if (is_qla8022(ha)) 2860 if (is_qla8022(ha))
@@ -3209,6 +3187,10 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
3209 3187
3210 retval = qla4_8xxx_device_state_handler(ha); 3188 retval = qla4_8xxx_device_state_handler(ha);
3211 3189
3190 /* Initialize request and response queues. */
3191 if (retval == QLA_SUCCESS)
3192 qla4xxx_init_rings(ha);
3193
3212 if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) 3194 if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
3213 retval = qla4xxx_request_irqs(ha); 3195 retval = qla4xxx_request_irqs(ha);
3214 3196
@@ -3836,3 +3818,24 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
3836msix_out: 3818msix_out:
3837 return ret; 3819 return ret;
3838} 3820}
3821
3822int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
3823{
3824 int status = QLA_SUCCESS;
3825
3826 /* Dont retry adapter initialization if IRQ allocation failed */
3827 if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
3828 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
3829 __func__);
3830 status = QLA_ERROR;
3831 goto exit_init_adapter_failure;
3832 }
3833
3834 /* Since interrupts are registered in start_firmware for
3835 * 8xxx, release them here if initialize_adapter fails
3836 * and retry adapter initialization */
3837 qla4xxx_free_irqs(ha);
3838
3839exit_init_adapter_failure:
3840 return status;
3841}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c21adc338cf1..459b9f7186fd 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1670,16 +1670,13 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1670 struct sockaddr_in *addr; 1670 struct sockaddr_in *addr;
1671 struct sockaddr_in6 *addr6; 1671 struct sockaddr_in6 *addr6;
1672 1672
1673 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1674 if (!shost) { 1673 if (!shost) {
1675 ret = -ENXIO; 1674 ret = -ENXIO;
1676 printk(KERN_ERR "%s: shost is NULL\n", 1675 pr_err("%s: shost is NULL\n", __func__);
1677 __func__);
1678 return ERR_PTR(ret); 1676 return ERR_PTR(ret);
1679 } 1677 }
1680 1678
1681 ha = iscsi_host_priv(shost); 1679 ha = iscsi_host_priv(shost);
1682
1683 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); 1680 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1684 if (!ep) { 1681 if (!ep) {
1685 ret = -ENOMEM; 1682 ret = -ENOMEM;
@@ -1699,6 +1696,9 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1699 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; 1696 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1700 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, 1697 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1701 (char *)&addr6->sin6_addr)); 1698 (char *)&addr6->sin6_addr));
1699 } else {
1700 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
1701 __func__);
1702 } 1702 }
1703 1703
1704 qla_ep->host = shost; 1704 qla_ep->host = shost;
@@ -1712,9 +1712,9 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1712 struct scsi_qla_host *ha; 1712 struct scsi_qla_host *ha;
1713 int ret = 0; 1713 int ret = 0;
1714 1714
1715 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1716 qla_ep = ep->dd_data; 1715 qla_ep = ep->dd_data;
1717 ha = to_qla_host(qla_ep->host); 1716 ha = to_qla_host(qla_ep->host);
1717 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
1718 1718
1719 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) 1719 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1720 ret = 1; 1720 ret = 1;
@@ -1724,7 +1724,13 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1724 1724
1725static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) 1725static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1726{ 1726{
1727 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1727 struct qla_endpoint *qla_ep;
1728 struct scsi_qla_host *ha;
1729
1730 qla_ep = ep->dd_data;
1731 ha = to_qla_host(qla_ep->host);
1732 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1733 ha->host_no));
1728 iscsi_destroy_endpoint(ep); 1734 iscsi_destroy_endpoint(ep);
1729} 1735}
1730 1736
@@ -1734,8 +1740,11 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1734{ 1740{
1735 struct qla_endpoint *qla_ep = ep->dd_data; 1741 struct qla_endpoint *qla_ep = ep->dd_data;
1736 struct sockaddr *dst_addr; 1742 struct sockaddr *dst_addr;
1743 struct scsi_qla_host *ha;
1737 1744
1738 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1745 ha = to_qla_host(qla_ep->host);
1746 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1747 ha->host_no));
1739 1748
1740 switch (param) { 1749 switch (param) {
1741 case ISCSI_PARAM_CONN_PORT: 1750 case ISCSI_PARAM_CONN_PORT:
@@ -1766,13 +1775,13 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1766 int ret; 1775 int ret;
1767 dma_addr_t iscsi_stats_dma; 1776 dma_addr_t iscsi_stats_dma;
1768 1777
1769 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1770
1771 cls_sess = iscsi_conn_to_session(cls_conn); 1778 cls_sess = iscsi_conn_to_session(cls_conn);
1772 sess = cls_sess->dd_data; 1779 sess = cls_sess->dd_data;
1773 ddb_entry = sess->dd_data; 1780 ddb_entry = sess->dd_data;
1774 ha = ddb_entry->ha; 1781 ha = ddb_entry->ha;
1775 1782
1783 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1784 ha->host_no));
1776 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); 1785 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1777 /* Allocate memory */ 1786 /* Allocate memory */
1778 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, 1787 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
@@ -2100,7 +2109,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
2100 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2109 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
2101 else 2110 else
2102 init_fw_cb->ipv6_tcp_opts &= 2111 init_fw_cb->ipv6_tcp_opts &=
2103 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE); 2112 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
2113 0xFFFF);
2104 break; 2114 break;
2105 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2115 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2106 if (iface_param->iface_num & 0x1) 2116 if (iface_param->iface_num & 0x1)
@@ -2297,7 +2307,8 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
2297 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); 2307 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
2298 else 2308 else
2299 init_fw_cb->ipv4_tcp_opts &= 2309 init_fw_cb->ipv4_tcp_opts &=
2300 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE); 2310 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
2311 0xFFFF);
2301 break; 2312 break;
2302 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: 2313 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2303 if (iface_param->iface_num & 0x1) 2314 if (iface_param->iface_num & 0x1)
@@ -3045,7 +3056,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
3045 struct sockaddr *dst_addr; 3056 struct sockaddr *dst_addr;
3046 int ret; 3057 int ret;
3047 3058
3048 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3049 if (!ep) { 3059 if (!ep) {
3050 printk(KERN_ERR "qla4xxx: missing ep.\n"); 3060 printk(KERN_ERR "qla4xxx: missing ep.\n");
3051 return NULL; 3061 return NULL;
@@ -3054,6 +3064,8 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
3054 qla_ep = ep->dd_data; 3064 qla_ep = ep->dd_data;
3055 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 3065 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
3056 ha = to_qla_host(qla_ep->host); 3066 ha = to_qla_host(qla_ep->host);
3067 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3068 ha->host_no));
3057 3069
3058 ret = qla4xxx_get_ddb_index(ha, &ddb_index); 3070 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
3059 if (ret == QLA_ERROR) 3071 if (ret == QLA_ERROR)
@@ -3074,6 +3086,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
3074 ddb_entry->sess = cls_sess; 3086 ddb_entry->sess = cls_sess;
3075 ddb_entry->unblock_sess = qla4xxx_unblock_ddb; 3087 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
3076 ddb_entry->ddb_change = qla4xxx_ddb_change; 3088 ddb_entry->ddb_change = qla4xxx_ddb_change;
3089 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
3077 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 3090 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
3078 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 3091 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
3079 ha->tot_ddbs++; 3092 ha->tot_ddbs++;
@@ -3092,10 +3105,11 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3092 uint32_t ddb_state; 3105 uint32_t ddb_state;
3093 int ret; 3106 int ret;
3094 3107
3095 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3096 sess = cls_sess->dd_data; 3108 sess = cls_sess->dd_data;
3097 ddb_entry = sess->dd_data; 3109 ddb_entry = sess->dd_data;
3098 ha = ddb_entry->ha; 3110 ha = ddb_entry->ha;
3111 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3112 ha->host_no));
3099 3113
3100 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 3114 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3101 &fw_ddb_entry_dma, GFP_KERNEL); 3115 &fw_ddb_entry_dma, GFP_KERNEL);
@@ -3123,7 +3137,8 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3123 3137
3124destroy_session: 3138destroy_session:
3125 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 3139 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
3126 3140 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
3141 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
3127 spin_lock_irqsave(&ha->hardware_lock, flags); 3142 spin_lock_irqsave(&ha->hardware_lock, flags);
3128 qla4xxx_free_ddb(ha, ddb_entry); 3143 qla4xxx_free_ddb(ha, ddb_entry);
3129 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3144 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3141,17 +3156,23 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
3141 struct iscsi_cls_conn *cls_conn; 3156 struct iscsi_cls_conn *cls_conn;
3142 struct iscsi_session *sess; 3157 struct iscsi_session *sess;
3143 struct ddb_entry *ddb_entry; 3158 struct ddb_entry *ddb_entry;
3159 struct scsi_qla_host *ha;
3144 3160
3145 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3146 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 3161 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
3147 conn_idx); 3162 conn_idx);
3148 if (!cls_conn) 3163 if (!cls_conn) {
3164 pr_info("%s: Can not create connection for conn_idx = %u\n",
3165 __func__, conn_idx);
3149 return NULL; 3166 return NULL;
3167 }
3150 3168
3151 sess = cls_sess->dd_data; 3169 sess = cls_sess->dd_data;
3152 ddb_entry = sess->dd_data; 3170 ddb_entry = sess->dd_data;
3153 ddb_entry->conn = cls_conn; 3171 ddb_entry->conn = cls_conn;
3154 3172
3173 ha = ddb_entry->ha;
3174 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
3175 conn_idx));
3155 return cls_conn; 3176 return cls_conn;
3156} 3177}
3157 3178
@@ -3162,8 +3183,16 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3162 struct iscsi_conn *conn; 3183 struct iscsi_conn *conn;
3163 struct qla_conn *qla_conn; 3184 struct qla_conn *qla_conn;
3164 struct iscsi_endpoint *ep; 3185 struct iscsi_endpoint *ep;
3186 struct ddb_entry *ddb_entry;
3187 struct scsi_qla_host *ha;
3188 struct iscsi_session *sess;
3165 3189
3166 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 3190 sess = cls_session->dd_data;
3191 ddb_entry = sess->dd_data;
3192 ha = ddb_entry->ha;
3193
3194 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3195 cls_session->sid, cls_conn->cid));
3167 3196
3168 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3197 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3169 return -EINVAL; 3198 return -EINVAL;
@@ -3186,10 +3215,11 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
3186 int ret = 0; 3215 int ret = 0;
3187 int status = QLA_SUCCESS; 3216 int status = QLA_SUCCESS;
3188 3217
3189 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3190 sess = cls_sess->dd_data; 3218 sess = cls_sess->dd_data;
3191 ddb_entry = sess->dd_data; 3219 ddb_entry = sess->dd_data;
3192 ha = ddb_entry->ha; 3220 ha = ddb_entry->ha;
3221 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3222 cls_sess->sid, cls_conn->cid));
3193 3223
3194 /* Check if we have matching FW DDB, if yes then do not 3224 /* Check if we have matching FW DDB, if yes then do not
3195 * login to this target. This could cause target to logout previous 3225 * login to this target. This could cause target to logout previous
@@ -3263,10 +3293,11 @@ static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
3263 struct ddb_entry *ddb_entry; 3293 struct ddb_entry *ddb_entry;
3264 int options; 3294 int options;
3265 3295
3266 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3267 sess = cls_sess->dd_data; 3296 sess = cls_sess->dd_data;
3268 ddb_entry = sess->dd_data; 3297 ddb_entry = sess->dd_data;
3269 ha = ddb_entry->ha; 3298 ha = ddb_entry->ha;
3299 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
3300 cls_conn->cid));
3270 3301
3271 options = LOGOUT_OPTION_CLOSE_SESSION; 3302 options = LOGOUT_OPTION_CLOSE_SESSION;
3272 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) 3303 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
@@ -4372,6 +4403,11 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
4372 uint32_t dev_state; 4403 uint32_t dev_state;
4373 uint32_t idc_ctrl; 4404 uint32_t idc_ctrl;
4374 4405
4406 if (is_qla8032(ha) &&
4407 (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
4408 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
4409 __func__, ha->func_num);
4410
4375 /* don't poll if reset is going on */ 4411 /* don't poll if reset is going on */
4376 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 4412 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4377 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 4413 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -4554,11 +4590,19 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
4554 uint32_t index = 0; 4590 uint32_t index = 0;
4555 unsigned long flags; 4591 unsigned long flags;
4556 struct scsi_cmnd *cmd; 4592 struct scsi_cmnd *cmd;
4593 unsigned long wtime;
4594 uint32_t wtmo;
4595
4596 if (is_qla40XX(ha))
4597 wtmo = WAIT_CMD_TOV;
4598 else
4599 wtmo = ha->nx_reset_timeout / 2;
4557 4600
4558 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ); 4601 wtime = jiffies + (wtmo * HZ);
4559 4602
4560 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to " 4603 DEBUG2(ql4_printk(KERN_INFO, ha,
4561 "complete\n", WAIT_CMD_TOV)); 4604 "Wait up to %u seconds for cmds to complete\n",
4605 wtmo));
4562 4606
4563 while (!time_after_eq(jiffies, wtime)) { 4607 while (!time_after_eq(jiffies, wtime)) {
4564 spin_lock_irqsave(&ha->hardware_lock, flags); 4608 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -4861,11 +4905,11 @@ chip_reset:
4861 qla4xxx_cmd_wait(ha); 4905 qla4xxx_cmd_wait(ha);
4862 4906
4863 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4907 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4864 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4865 DEBUG2(ql4_printk(KERN_INFO, ha, 4908 DEBUG2(ql4_printk(KERN_INFO, ha,
4866 "scsi%ld: %s - Performing chip reset..\n", 4909 "scsi%ld: %s - Performing chip reset..\n",
4867 ha->host_no, __func__)); 4910 ha->host_no, __func__));
4868 status = ha->isp_ops->reset_chip(ha); 4911 status = ha->isp_ops->reset_chip(ha);
4912 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4869 } 4913 }
4870 4914
4871 /* Flush any pending ddb changed AENs */ 4915 /* Flush any pending ddb changed AENs */
@@ -4881,8 +4925,21 @@ recover_ha_init_adapter:
4881 ssleep(6); 4925 ssleep(6);
4882 4926
4883 /* NOTE: AF_ONLINE flag set upon successful completion of 4927 /* NOTE: AF_ONLINE flag set upon successful completion of
4884 * qla4xxx_initialize_adapter */ 4928 * qla4xxx_initialize_adapter */
4885 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 4929 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4930 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
4931 status = qla4_8xxx_check_init_adapter_retry(ha);
4932 if (status == QLA_ERROR) {
4933 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
4934 ha->host_no, __func__);
4935 qla4xxx_dead_adapter_cleanup(ha);
4936 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4937 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4938 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4939 &ha->dpc_flags);
4940 goto exit_recover;
4941 }
4942 }
4886 } 4943 }
4887 4944
4888 /* Retry failed adapter initialization, if necessary 4945 /* Retry failed adapter initialization, if necessary
@@ -5228,9 +5285,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
5228 container_of(work, struct scsi_qla_host, dpc_work); 5285 container_of(work, struct scsi_qla_host, dpc_work);
5229 int status = QLA_ERROR; 5286 int status = QLA_ERROR;
5230 5287
5231 DEBUG2(printk("scsi%ld: %s: DPC handler waking up." 5288 DEBUG2(ql4_printk(KERN_INFO, ha,
5232 "flags = 0x%08lx, dpc_flags = 0x%08lx\n", 5289 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
5233 ha->host_no, __func__, ha->flags, ha->dpc_flags)) 5290 ha->host_no, __func__, ha->flags, ha->dpc_flags));
5234 5291
5235 /* Initialization not yet finished. Don't do anything yet. */ 5292 /* Initialization not yet finished. Don't do anything yet. */
5236 if (!test_bit(AF_INIT_DONE, &ha->flags)) 5293 if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -8681,11 +8738,8 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8681 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8738 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8682 8739
8683 /* Dont retry adapter initialization if IRQ allocation failed */ 8740 /* Dont retry adapter initialization if IRQ allocation failed */
8684 if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) { 8741 if (is_qla80XX(ha) && (status == QLA_ERROR))
8685 ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
8686 __func__);
8687 goto skip_retry_init; 8742 goto skip_retry_init;
8688 }
8689 8743
8690 while ((!test_bit(AF_ONLINE, &ha->flags)) && 8744 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
8691 init_retry_count++ < MAX_INIT_RETRIES) { 8745 init_retry_count++ < MAX_INIT_RETRIES) {
@@ -8709,6 +8763,10 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8709 continue; 8763 continue;
8710 8764
8711 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 8765 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8766 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
8767 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
8768 goto skip_retry_init;
8769 }
8712 } 8770 }
8713 8771
8714skip_retry_init: 8772skip_retry_init:
@@ -8857,10 +8915,56 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
8857 } 8915 }
8858} 8916}
8859 8917
8918static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
8919 struct ddb_entry *ddb_entry)
8920{
8921 struct dev_db_entry *fw_ddb_entry = NULL;
8922 dma_addr_t fw_ddb_entry_dma;
8923 unsigned long wtime;
8924 uint32_t ddb_state;
8925 int options;
8926 int status;
8927
8928 options = LOGOUT_OPTION_CLOSE_SESSION;
8929 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
8930 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
8931 goto clear_ddb;
8932 }
8933
8934 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8935 &fw_ddb_entry_dma, GFP_KERNEL);
8936 if (!fw_ddb_entry) {
8937 ql4_printk(KERN_ERR, ha,
8938 "%s: Unable to allocate dma buffer\n", __func__);
8939 goto clear_ddb;
8940 }
8941
8942 wtime = jiffies + (HZ * LOGOUT_TOV);
8943 do {
8944 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
8945 fw_ddb_entry, fw_ddb_entry_dma,
8946 NULL, NULL, &ddb_state, NULL,
8947 NULL, NULL);
8948 if (status == QLA_ERROR)
8949 goto free_ddb;
8950
8951 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
8952 (ddb_state == DDB_DS_SESSION_FAILED))
8953 goto free_ddb;
8954
8955 schedule_timeout_uninterruptible(HZ);
8956 } while ((time_after(wtime, jiffies)));
8957
8958free_ddb:
8959 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8960 fw_ddb_entry, fw_ddb_entry_dma);
8961clear_ddb:
8962 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8963}
8964
8860static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) 8965static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8861{ 8966{
8862 struct ddb_entry *ddb_entry; 8967 struct ddb_entry *ddb_entry;
8863 int options;
8864 int idx; 8968 int idx;
8865 8969
8866 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { 8970 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
@@ -8869,13 +8973,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8869 if ((ddb_entry != NULL) && 8973 if ((ddb_entry != NULL) &&
8870 (ddb_entry->ddb_type == FLASH_DDB)) { 8974 (ddb_entry->ddb_type == FLASH_DDB)) {
8871 8975
8872 options = LOGOUT_OPTION_CLOSE_SESSION; 8976 qla4xxx_destroy_ddb(ha, ddb_entry);
8873 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
8874 == QLA_ERROR)
8875 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
8876 __func__);
8877
8878 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8879 /* 8977 /*
8880 * we have decremented the reference count of the driver 8978 * we have decremented the reference count of the driver
8881 * when we setup the session to have the driver unload 8979 * when we setup the session to have the driver unload
@@ -9136,14 +9234,15 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9136 int ret = SUCCESS; 9234 int ret = SUCCESS;
9137 int wait = 0; 9235 int wait = 0;
9138 9236
9139 ql4_printk(KERN_INFO, ha, 9237 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n",
9140 "scsi%ld:%d:%d: Abort command issued cmd=%p\n", 9238 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9141 ha->host_no, id, lun, cmd);
9142 9239
9143 spin_lock_irqsave(&ha->hardware_lock, flags); 9240 spin_lock_irqsave(&ha->hardware_lock, flags);
9144 srb = (struct srb *) CMD_SP(cmd); 9241 srb = (struct srb *) CMD_SP(cmd);
9145 if (!srb) { 9242 if (!srb) {
9146 spin_unlock_irqrestore(&ha->hardware_lock, flags); 9243 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9244 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n",
9245 ha->host_no, id, lun);
9147 return SUCCESS; 9246 return SUCCESS;
9148 } 9247 }
9149 kref_get(&srb->srb_ref); 9248 kref_get(&srb->srb_ref);
@@ -9560,28 +9659,36 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9560 } 9659 }
9561 9660
9562 fn = PCI_FUNC(ha->pdev->devfn); 9661 fn = PCI_FUNC(ha->pdev->devfn);
9563 while (fn > 0) { 9662 if (is_qla8022(ha)) {
9564 fn--; 9663 while (fn > 0) {
9565 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " 9664 fn--;
9566 "func %x\n", ha->host_no, __func__, fn); 9665 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
9567 /* Get the pci device given the domain, bus, 9666 ha->host_no, __func__, fn);
9568 * slot/function number */ 9667 /* Get the pci device given the domain, bus,
9569 other_pdev = 9668 * slot/function number */
9570 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 9669 other_pdev = pci_get_domain_bus_and_slot(
9571 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 9670 pci_domain_nr(ha->pdev->bus),
9572 fn)); 9671 ha->pdev->bus->number,
9573 9672 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
9574 if (!other_pdev) 9673 fn));
9575 continue; 9674
9675 if (!other_pdev)
9676 continue;
9576 9677
9577 if (atomic_read(&other_pdev->enable_cnt)) { 9678 if (atomic_read(&other_pdev->enable_cnt)) {
9578 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " 9679 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
9579 "func in enabled state%x\n", ha->host_no, 9680 ha->host_no, __func__, fn);
9580 __func__, fn); 9681 pci_dev_put(other_pdev);
9682 break;
9683 }
9581 pci_dev_put(other_pdev); 9684 pci_dev_put(other_pdev);
9582 break;
9583 } 9685 }
9584 pci_dev_put(other_pdev); 9686 } else {
9687 /* this case is meant for ISP83xx/ISP84xx only */
9688 if (qla4_83xx_can_perform_reset(ha)) {
9689 /* reset fn as iSCSI is going to perform the reset */
9690 fn = 0;
9691 }
9585 } 9692 }
9586 9693
9587 /* The first function on the card, the reset owner will 9694 /* The first function on the card, the reset owner will
@@ -9615,6 +9722,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9615 if (rval != QLA_SUCCESS) { 9722 if (rval != QLA_SUCCESS) {
9616 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 9723 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9617 "FAILED\n", ha->host_no, __func__); 9724 "FAILED\n", ha->host_no, __func__);
9725 qla4xxx_free_irqs(ha);
9618 ha->isp_ops->idc_lock(ha); 9726 ha->isp_ops->idc_lock(ha);
9619 qla4_8xxx_clear_drv_active(ha); 9727 qla4_8xxx_clear_drv_active(ha);
9620 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 9728 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
@@ -9642,6 +9750,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9642 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 9750 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9643 if (rval == QLA_SUCCESS) 9751 if (rval == QLA_SUCCESS)
9644 ha->isp_ops->enable_intrs(ha); 9752 ha->isp_ops->enable_intrs(ha);
9753 else
9754 qla4xxx_free_irqs(ha);
9645 9755
9646 ha->isp_ops->idc_lock(ha); 9756 ha->isp_ops->idc_lock(ha);
9647 qla4_8xxx_set_drv_active(ha); 9757 qla4_8xxx_set_drv_active(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9b2946658683..c6ba0a6b8458 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.04.00-k3" 8#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d8afec8317cf..c4d632c27a3e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -161,47 +161,20 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
161static DEFINE_MUTEX(host_cmd_pool_mutex); 161static DEFINE_MUTEX(host_cmd_pool_mutex);
162 162
163/** 163/**
164 * scsi_pool_alloc_command - internal function to get a fully allocated command 164 * scsi_host_free_command - internal function to release a command
165 * @pool: slab pool to allocate the command from 165 * @shost: host to free the command for
166 * @gfp_mask: mask for the allocation
167 *
168 * Returns a fully allocated command (with the allied sense buffer) or
169 * NULL on failure
170 */
171static struct scsi_cmnd *
172scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
173{
174 struct scsi_cmnd *cmd;
175
176 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
177 if (!cmd)
178 return NULL;
179
180 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
181 gfp_mask | pool->gfp_mask);
182 if (!cmd->sense_buffer) {
183 kmem_cache_free(pool->cmd_slab, cmd);
184 return NULL;
185 }
186
187 return cmd;
188}
189
190/**
191 * scsi_pool_free_command - internal function to release a command
192 * @pool: slab pool to allocate the command from
193 * @cmd: command to release 166 * @cmd: command to release
194 * 167 *
195 * the command must previously have been allocated by 168 * the command must previously have been allocated by
196 * scsi_pool_alloc_command. 169 * scsi_host_alloc_command.
197 */ 170 */
198static void 171static void
199scsi_pool_free_command(struct scsi_host_cmd_pool *pool, 172scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
200 struct scsi_cmnd *cmd)
201{ 173{
174 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
175
202 if (cmd->prot_sdb) 176 if (cmd->prot_sdb)
203 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); 177 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
204
205 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 178 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
206 kmem_cache_free(pool->cmd_slab, cmd); 179 kmem_cache_free(pool->cmd_slab, cmd);
207} 180}
@@ -217,22 +190,32 @@ scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
217static struct scsi_cmnd * 190static struct scsi_cmnd *
218scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) 191scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
219{ 192{
193 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
220 struct scsi_cmnd *cmd; 194 struct scsi_cmnd *cmd;
221 195
222 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 196 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
223 if (!cmd) 197 if (!cmd)
224 return NULL; 198 goto fail;
199
200 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
201 gfp_mask | pool->gfp_mask);
202 if (!cmd->sense_buffer)
203 goto fail_free_cmd;
225 204
226 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { 205 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
227 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); 206 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
228 207 if (!cmd->prot_sdb)
229 if (!cmd->prot_sdb) { 208 goto fail_free_sense;
230 scsi_pool_free_command(shost->cmd_pool, cmd);
231 return NULL;
232 }
233 } 209 }
234 210
235 return cmd; 211 return cmd;
212
213fail_free_sense:
214 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
215fail_free_cmd:
216 kmem_cache_free(pool->cmd_slab, cmd);
217fail:
218 return NULL;
236} 219}
237 220
238/** 221/**
@@ -284,27 +267,19 @@ EXPORT_SYMBOL_GPL(__scsi_get_command);
284 */ 267 */
285struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 268struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
286{ 269{
287 struct scsi_cmnd *cmd; 270 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
271 unsigned long flags;
288 272
289 /* Bail if we can't get a reference to the device */ 273 if (unlikely(cmd == NULL))
290 if (!get_device(&dev->sdev_gendev))
291 return NULL; 274 return NULL;
292 275
293 cmd = __scsi_get_command(dev->host, gfp_mask); 276 cmd->device = dev;
294 277 INIT_LIST_HEAD(&cmd->list);
295 if (likely(cmd != NULL)) { 278 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
296 unsigned long flags; 279 spin_lock_irqsave(&dev->list_lock, flags);
297 280 list_add_tail(&cmd->list, &dev->cmd_list);
298 cmd->device = dev; 281 spin_unlock_irqrestore(&dev->list_lock, flags);
299 INIT_LIST_HEAD(&cmd->list); 282 cmd->jiffies_at_alloc = jiffies;
300 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
301 spin_lock_irqsave(&dev->list_lock, flags);
302 list_add_tail(&cmd->list, &dev->cmd_list);
303 spin_unlock_irqrestore(&dev->list_lock, flags);
304 cmd->jiffies_at_alloc = jiffies;
305 } else
306 put_device(&dev->sdev_gendev);
307
308 return cmd; 283 return cmd;
309} 284}
310EXPORT_SYMBOL(scsi_get_command); 285EXPORT_SYMBOL(scsi_get_command);
@@ -313,25 +288,22 @@ EXPORT_SYMBOL(scsi_get_command);
313 * __scsi_put_command - Free a struct scsi_cmnd 288 * __scsi_put_command - Free a struct scsi_cmnd
314 * @shost: dev->host 289 * @shost: dev->host
315 * @cmd: Command to free 290 * @cmd: Command to free
316 * @dev: parent scsi device
317 */ 291 */
318void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, 292void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
319 struct device *dev)
320{ 293{
321 unsigned long flags; 294 unsigned long flags;
322 295
323 /* changing locks here, don't need to restore the irq state */
324 spin_lock_irqsave(&shost->free_list_lock, flags);
325 if (unlikely(list_empty(&shost->free_list))) { 296 if (unlikely(list_empty(&shost->free_list))) {
326 list_add(&cmd->list, &shost->free_list); 297 spin_lock_irqsave(&shost->free_list_lock, flags);
327 cmd = NULL; 298 if (list_empty(&shost->free_list)) {
299 list_add(&cmd->list, &shost->free_list);
300 cmd = NULL;
301 }
302 spin_unlock_irqrestore(&shost->free_list_lock, flags);
328 } 303 }
329 spin_unlock_irqrestore(&shost->free_list_lock, flags);
330 304
331 if (likely(cmd != NULL)) 305 if (likely(cmd != NULL))
332 scsi_pool_free_command(shost->cmd_pool, cmd); 306 scsi_host_free_command(shost, cmd);
333
334 put_device(dev);
335} 307}
336EXPORT_SYMBOL(__scsi_put_command); 308EXPORT_SYMBOL(__scsi_put_command);
337 309
@@ -345,7 +317,6 @@ EXPORT_SYMBOL(__scsi_put_command);
345 */ 317 */
346void scsi_put_command(struct scsi_cmnd *cmd) 318void scsi_put_command(struct scsi_cmnd *cmd)
347{ 319{
348 struct scsi_device *sdev = cmd->device;
349 unsigned long flags; 320 unsigned long flags;
350 321
351 /* serious error if the command hasn't come from a device list */ 322 /* serious error if the command hasn't come from a device list */
@@ -356,50 +327,107 @@ void scsi_put_command(struct scsi_cmnd *cmd)
356 327
357 cancel_delayed_work(&cmd->abort_work); 328 cancel_delayed_work(&cmd->abort_work);
358 329
359 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev); 330 __scsi_put_command(cmd->device->host, cmd);
360} 331}
361EXPORT_SYMBOL(scsi_put_command); 332EXPORT_SYMBOL(scsi_put_command);
362 333
363static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask) 334static struct scsi_host_cmd_pool *
335scsi_find_host_cmd_pool(struct Scsi_Host *shost)
336{
337 if (shost->hostt->cmd_size)
338 return shost->hostt->cmd_pool;
339 if (shost->unchecked_isa_dma)
340 return &scsi_cmd_dma_pool;
341 return &scsi_cmd_pool;
342}
343
344static void
345scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
364{ 346{
347 kfree(pool->sense_name);
348 kfree(pool->cmd_name);
349 kfree(pool);
350}
351
352static struct scsi_host_cmd_pool *
353scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
354{
355 struct scsi_host_template *hostt = shost->hostt;
356 struct scsi_host_cmd_pool *pool;
357
358 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
359 if (!pool)
360 return NULL;
361
362 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
363 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
364 if (!pool->cmd_name || !pool->sense_name) {
365 scsi_free_host_cmd_pool(pool);
366 return NULL;
367 }
368
369 pool->slab_flags = SLAB_HWCACHE_ALIGN;
370 if (shost->unchecked_isa_dma) {
371 pool->slab_flags |= SLAB_CACHE_DMA;
372 pool->gfp_mask = __GFP_DMA;
373 }
374 return pool;
375}
376
377static struct scsi_host_cmd_pool *
378scsi_get_host_cmd_pool(struct Scsi_Host *shost)
379{
380 struct scsi_host_template *hostt = shost->hostt;
365 struct scsi_host_cmd_pool *retval = NULL, *pool; 381 struct scsi_host_cmd_pool *retval = NULL, *pool;
382 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
383
366 /* 384 /*
367 * Select a command slab for this host and create it if not 385 * Select a command slab for this host and create it if not
368 * yet existent. 386 * yet existent.
369 */ 387 */
370 mutex_lock(&host_cmd_pool_mutex); 388 mutex_lock(&host_cmd_pool_mutex);
371 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : 389 pool = scsi_find_host_cmd_pool(shost);
372 &scsi_cmd_pool; 390 if (!pool) {
391 pool = scsi_alloc_host_cmd_pool(shost);
392 if (!pool)
393 goto out;
394 }
395
373 if (!pool->users) { 396 if (!pool->users) {
374 pool->cmd_slab = kmem_cache_create(pool->cmd_name, 397 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
375 sizeof(struct scsi_cmnd), 0,
376 pool->slab_flags, NULL); 398 pool->slab_flags, NULL);
377 if (!pool->cmd_slab) 399 if (!pool->cmd_slab)
378 goto fail; 400 goto out_free_pool;
379 401
380 pool->sense_slab = kmem_cache_create(pool->sense_name, 402 pool->sense_slab = kmem_cache_create(pool->sense_name,
381 SCSI_SENSE_BUFFERSIZE, 0, 403 SCSI_SENSE_BUFFERSIZE, 0,
382 pool->slab_flags, NULL); 404 pool->slab_flags, NULL);
383 if (!pool->sense_slab) { 405 if (!pool->sense_slab)
384 kmem_cache_destroy(pool->cmd_slab); 406 goto out_free_slab;
385 goto fail;
386 }
387 } 407 }
388 408
389 pool->users++; 409 pool->users++;
390 retval = pool; 410 retval = pool;
391 fail: 411out:
392 mutex_unlock(&host_cmd_pool_mutex); 412 mutex_unlock(&host_cmd_pool_mutex);
393 return retval; 413 return retval;
414
415out_free_slab:
416 kmem_cache_destroy(pool->cmd_slab);
417out_free_pool:
418 if (hostt->cmd_size)
419 scsi_free_host_cmd_pool(pool);
420 goto out;
394} 421}
395 422
396static void scsi_put_host_cmd_pool(gfp_t gfp_mask) 423static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
397{ 424{
425 struct scsi_host_template *hostt = shost->hostt;
398 struct scsi_host_cmd_pool *pool; 426 struct scsi_host_cmd_pool *pool;
399 427
400 mutex_lock(&host_cmd_pool_mutex); 428 mutex_lock(&host_cmd_pool_mutex);
401 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : 429 pool = scsi_find_host_cmd_pool(shost);
402 &scsi_cmd_pool; 430
403 /* 431 /*
404 * This may happen if a driver has a mismatched get and put 432 * This may happen if a driver has a mismatched get and put
405 * of the command pool; the driver should be implicated in 433 * of the command pool; the driver should be implicated in
@@ -410,67 +438,13 @@ static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
410 if (!--pool->users) { 438 if (!--pool->users) {
411 kmem_cache_destroy(pool->cmd_slab); 439 kmem_cache_destroy(pool->cmd_slab);
412 kmem_cache_destroy(pool->sense_slab); 440 kmem_cache_destroy(pool->sense_slab);
441 if (hostt->cmd_size)
442 scsi_free_host_cmd_pool(pool);
413 } 443 }
414 mutex_unlock(&host_cmd_pool_mutex); 444 mutex_unlock(&host_cmd_pool_mutex);
415} 445}
416 446
417/** 447/**
418 * scsi_allocate_command - get a fully allocated SCSI command
419 * @gfp_mask: allocation mask
420 *
421 * This function is for use outside of the normal host based pools.
422 * It allocates the relevant command and takes an additional reference
423 * on the pool it used. This function *must* be paired with
424 * scsi_free_command which also has the identical mask, otherwise the
425 * free pool counts will eventually go wrong and you'll trigger a bug.
426 *
427 * This function should *only* be used by drivers that need a static
428 * command allocation at start of day for internal functions.
429 */
430struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
431{
432 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
433
434 if (!pool)
435 return NULL;
436
437 return scsi_pool_alloc_command(pool, gfp_mask);
438}
439EXPORT_SYMBOL(scsi_allocate_command);
440
441/**
442 * scsi_free_command - free a command allocated by scsi_allocate_command
443 * @gfp_mask: mask used in the original allocation
444 * @cmd: command to free
445 *
446 * Note: using the original allocation mask is vital because that's
447 * what determines which command pool we use to free the command. Any
448 * mismatch will cause the system to BUG eventually.
449 */
450void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
451{
452 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
453
454 /*
455 * this could trigger if the mask to scsi_allocate_command
456 * doesn't match this mask. Otherwise we're guaranteed that this
457 * succeeds because scsi_allocate_command must have taken a reference
458 * on the pool
459 */
460 BUG_ON(!pool);
461
462 scsi_pool_free_command(pool, cmd);
463 /*
464 * scsi_put_host_cmd_pool is called twice; once to release the
465 * reference we took above, and once to release the reference
466 * originally taken by scsi_allocate_command
467 */
468 scsi_put_host_cmd_pool(gfp_mask);
469 scsi_put_host_cmd_pool(gfp_mask);
470}
471EXPORT_SYMBOL(scsi_free_command);
472
473/**
474 * scsi_setup_command_freelist - Setup the command freelist for a scsi host. 448 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
475 * @shost: host to allocate the freelist for. 449 * @shost: host to allocate the freelist for.
476 * 450 *
@@ -482,14 +456,13 @@ EXPORT_SYMBOL(scsi_free_command);
482 */ 456 */
483int scsi_setup_command_freelist(struct Scsi_Host *shost) 457int scsi_setup_command_freelist(struct Scsi_Host *shost)
484{ 458{
485 struct scsi_cmnd *cmd;
486 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; 459 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
460 struct scsi_cmnd *cmd;
487 461
488 spin_lock_init(&shost->free_list_lock); 462 spin_lock_init(&shost->free_list_lock);
489 INIT_LIST_HEAD(&shost->free_list); 463 INIT_LIST_HEAD(&shost->free_list);
490 464
491 shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask); 465 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
492
493 if (!shost->cmd_pool) 466 if (!shost->cmd_pool)
494 return -ENOMEM; 467 return -ENOMEM;
495 468
@@ -498,7 +471,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
498 */ 471 */
499 cmd = scsi_host_alloc_command(shost, gfp_mask); 472 cmd = scsi_host_alloc_command(shost, gfp_mask);
500 if (!cmd) { 473 if (!cmd) {
501 scsi_put_host_cmd_pool(gfp_mask); 474 scsi_put_host_cmd_pool(shost);
502 shost->cmd_pool = NULL; 475 shost->cmd_pool = NULL;
503 return -ENOMEM; 476 return -ENOMEM;
504 } 477 }
@@ -524,10 +497,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
524 497
525 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 498 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
526 list_del_init(&cmd->list); 499 list_del_init(&cmd->list);
527 scsi_pool_free_command(shost->cmd_pool, cmd); 500 scsi_host_free_command(shost, cmd);
528 } 501 }
529 shost->cmd_pool = NULL; 502 shost->cmd_pool = NULL;
530 scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL); 503 scsi_put_host_cmd_pool(shost);
531} 504}
532 505
533#ifdef CONFIG_SCSI_LOGGING 506#ifdef CONFIG_SCSI_LOGGING
@@ -954,7 +927,7 @@ EXPORT_SYMBOL(scsi_track_queue_full);
954 * This is an internal helper function. You probably want to use 927 * This is an internal helper function. You probably want to use
955 * scsi_get_vpd_page instead. 928 * scsi_get_vpd_page instead.
956 * 929 *
957 * Returns 0 on success or a negative error number. 930 * Returns size of the vpd page on success or a negative error number.
958 */ 931 */
959static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, 932static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
960 u8 page, unsigned len) 933 u8 page, unsigned len)
@@ -962,6 +935,9 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
962 int result; 935 int result;
963 unsigned char cmd[16]; 936 unsigned char cmd[16];
964 937
938 if (len < 4)
939 return -EINVAL;
940
965 cmd[0] = INQUIRY; 941 cmd[0] = INQUIRY;
966 cmd[1] = 1; /* EVPD */ 942 cmd[1] = 1; /* EVPD */
967 cmd[2] = page; 943 cmd[2] = page;
@@ -976,13 +952,13 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
976 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, 952 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
977 len, NULL, 30 * HZ, 3, NULL); 953 len, NULL, 30 * HZ, 3, NULL);
978 if (result) 954 if (result)
979 return result; 955 return -EIO;
980 956
981 /* Sanity check that we got the page back that we asked for */ 957 /* Sanity check that we got the page back that we asked for */
982 if (buffer[1] != page) 958 if (buffer[1] != page)
983 return -EIO; 959 return -EIO;
984 960
985 return 0; 961 return get_unaligned_be16(&buffer[2]) + 4;
986} 962}
987 963
988/** 964/**
@@ -1009,18 +985,18 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1009 985
1010 /* Ask for all the pages supported by this device */ 986 /* Ask for all the pages supported by this device */
1011 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); 987 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
1012 if (result) 988 if (result < 4)
1013 goto fail; 989 goto fail;
1014 990
1015 /* If the user actually wanted this page, we can skip the rest */ 991 /* If the user actually wanted this page, we can skip the rest */
1016 if (page == 0) 992 if (page == 0)
1017 return 0; 993 return 0;
1018 994
1019 for (i = 0; i < min((int)buf[3], buf_len - 4); i++) 995 for (i = 4; i < min(result, buf_len); i++)
1020 if (buf[i + 4] == page) 996 if (buf[i] == page)
1021 goto found; 997 goto found;
1022 998
1023 if (i < buf[3] && i >= buf_len - 4) 999 if (i < result && i >= buf_len)
1024 /* ran off the end of the buffer, give us benefit of doubt */ 1000 /* ran off the end of the buffer, give us benefit of doubt */
1025 goto found; 1001 goto found;
1026 /* The device claims it doesn't support the requested page */ 1002 /* The device claims it doesn't support the requested page */
@@ -1028,7 +1004,7 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1028 1004
1029 found: 1005 found:
1030 result = scsi_vpd_inquiry(sdev, buf, page, buf_len); 1006 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
1031 if (result) 1007 if (result < 0)
1032 goto fail; 1008 goto fail;
1033 1009
1034 return 0; 1010 return 0;
@@ -1039,6 +1015,93 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
1039EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 1015EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
1040 1016
1041/** 1017/**
1018 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
1019 * @sdev: The device to ask
1020 *
1021 * Attach the 'Device Identification' VPD page (0x83) and the
1022 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
1023 * structure. This information can be used to identify the device
1024 * uniquely.
1025 */
1026void scsi_attach_vpd(struct scsi_device *sdev)
1027{
1028 int result, i;
1029 int vpd_len = SCSI_VPD_PG_LEN;
1030 int pg80_supported = 0;
1031 int pg83_supported = 0;
1032 unsigned char *vpd_buf;
1033
1034 if (sdev->skip_vpd_pages)
1035 return;
1036retry_pg0:
1037 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1038 if (!vpd_buf)
1039 return;
1040
1041 /* Ask for all the pages supported by this device */
1042 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
1043 if (result < 0) {
1044 kfree(vpd_buf);
1045 return;
1046 }
1047 if (result > vpd_len) {
1048 vpd_len = result;
1049 kfree(vpd_buf);
1050 goto retry_pg0;
1051 }
1052
1053 for (i = 4; i < result; i++) {
1054 if (vpd_buf[i] == 0x80)
1055 pg80_supported = 1;
1056 if (vpd_buf[i] == 0x83)
1057 pg83_supported = 1;
1058 }
1059 kfree(vpd_buf);
1060 vpd_len = SCSI_VPD_PG_LEN;
1061
1062 if (pg80_supported) {
1063retry_pg80:
1064 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1065 if (!vpd_buf)
1066 return;
1067
1068 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1069 if (result < 0) {
1070 kfree(vpd_buf);
1071 return;
1072 }
1073 if (result > vpd_len) {
1074 vpd_len = result;
1075 kfree(vpd_buf);
1076 goto retry_pg80;
1077 }
1078 sdev->vpd_pg80_len = result;
1079 sdev->vpd_pg80 = vpd_buf;
1080 vpd_len = SCSI_VPD_PG_LEN;
1081 }
1082
1083 if (pg83_supported) {
1084retry_pg83:
1085 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1086 if (!vpd_buf)
1087 return;
1088
1089 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1090 if (result < 0) {
1091 kfree(vpd_buf);
1092 return;
1093 }
1094 if (result > vpd_len) {
1095 vpd_len = result;
1096 kfree(vpd_buf);
1097 goto retry_pg83;
1098 }
1099 sdev->vpd_pg83_len = result;
1100 sdev->vpd_pg83 = vpd_buf;
1101 }
1102}
1103
1104/**
1042 * scsi_report_opcode - Find out if a given command opcode is supported 1105 * scsi_report_opcode - Find out if a given command opcode is supported
1043 * @sdev: scsi device to query 1106 * @sdev: scsi device to query
1044 * @buffer: scratch buffer (must be at least 20 bytes long) 1107 * @buffer: scratch buffer (must be at least 20 bytes long)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2decc6417518..f3e9cc038d1d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -64,6 +64,7 @@ static const char * scsi_debug_version_date = "20100324";
64/* Additional Sense Code (ASC) */ 64/* Additional Sense Code (ASC) */
65#define NO_ADDITIONAL_SENSE 0x0 65#define NO_ADDITIONAL_SENSE 0x0
66#define LOGICAL_UNIT_NOT_READY 0x4 66#define LOGICAL_UNIT_NOT_READY 0x4
67#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
67#define UNRECOVERED_READ_ERR 0x11 68#define UNRECOVERED_READ_ERR 0x11
68#define PARAMETER_LIST_LENGTH_ERR 0x1a 69#define PARAMETER_LIST_LENGTH_ERR 0x1a
69#define INVALID_OPCODE 0x20 70#define INVALID_OPCODE 0x20
@@ -195,6 +196,7 @@ static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 196static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; 197static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197static bool scsi_debug_removable = DEF_REMOVABLE; 198static bool scsi_debug_removable = DEF_REMOVABLE;
199static bool scsi_debug_clustering;
198 200
199static int scsi_debug_cmnd_count = 0; 201static int scsi_debug_cmnd_count = 0;
200 202
@@ -1780,7 +1782,6 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1780 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1782 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1781 pr_err("%s: REF check failed on sector %lu\n", 1783 pr_err("%s: REF check failed on sector %lu\n",
1782 __func__, (unsigned long)sector); 1784 __func__, (unsigned long)sector);
1783 dif_errors++;
1784 return 0x03; 1785 return 0x03;
1785 } 1786 }
1786 return 0; 1787 return 0;
@@ -1789,23 +1790,27 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1789static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, 1790static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1790 unsigned int sectors, bool read) 1791 unsigned int sectors, bool read)
1791{ 1792{
1792 unsigned int i, resid; 1793 size_t resid;
1793 struct scatterlist *psgl;
1794 void *paddr; 1794 void *paddr;
1795 const void *dif_store_end = dif_storep + sdebug_store_sectors; 1795 const void *dif_store_end = dif_storep + sdebug_store_sectors;
1796 struct sg_mapping_iter miter;
1796 1797
1797 /* Bytes of protection data to copy into sgl */ 1798 /* Bytes of protection data to copy into sgl */
1798 resid = sectors * sizeof(*dif_storep); 1799 resid = sectors * sizeof(*dif_storep);
1799 1800
1800 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { 1801 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1801 int len = min(psgl->length, resid); 1802 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1803 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1804
1805 while (sg_miter_next(&miter) && resid > 0) {
1806 size_t len = min(miter.length, resid);
1802 void *start = dif_store(sector); 1807 void *start = dif_store(sector);
1803 int rest = 0; 1808 size_t rest = 0;
1804 1809
1805 if (dif_store_end < start + len) 1810 if (dif_store_end < start + len)
1806 rest = start + len - dif_store_end; 1811 rest = start + len - dif_store_end;
1807 1812
1808 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; 1813 paddr = miter.addr;
1809 1814
1810 if (read) 1815 if (read)
1811 memcpy(paddr, start, len - rest); 1816 memcpy(paddr, start, len - rest);
@@ -1821,8 +1826,8 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1821 1826
1822 sector += len / sizeof(*dif_storep); 1827 sector += len / sizeof(*dif_storep);
1823 resid -= len; 1828 resid -= len;
1824 kunmap_atomic(paddr);
1825 } 1829 }
1830 sg_miter_stop(&miter);
1826} 1831}
1827 1832
1828static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, 1833static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
@@ -1832,7 +1837,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1832 struct sd_dif_tuple *sdt; 1837 struct sd_dif_tuple *sdt;
1833 sector_t sector; 1838 sector_t sector;
1834 1839
1835 for (i = 0; i < sectors; i++) { 1840 for (i = 0; i < sectors; i++, ei_lba++) {
1836 int ret; 1841 int ret;
1837 1842
1838 sector = start_sec + i; 1843 sector = start_sec + i;
@@ -1846,8 +1851,6 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1846 dif_errors++; 1851 dif_errors++;
1847 return ret; 1852 return ret;
1848 } 1853 }
1849
1850 ei_lba++;
1851 } 1854 }
1852 1855
1853 dif_copy_prot(SCpnt, start_sec, sectors, true); 1856 dif_copy_prot(SCpnt, start_sec, sectors, true);
@@ -1886,17 +1889,19 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1886 return check_condition_result; 1889 return check_condition_result;
1887 } 1890 }
1888 1891
1892 read_lock_irqsave(&atomic_rw, iflags);
1893
1889 /* DIX + T10 DIF */ 1894 /* DIX + T10 DIF */
1890 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 1895 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1891 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); 1896 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1892 1897
1893 if (prot_ret) { 1898 if (prot_ret) {
1899 read_unlock_irqrestore(&atomic_rw, iflags);
1894 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret); 1900 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1895 return illegal_condition_result; 1901 return illegal_condition_result;
1896 } 1902 }
1897 } 1903 }
1898 1904
1899 read_lock_irqsave(&atomic_rw, iflags);
1900 ret = do_device_access(SCpnt, devip, lba, num, 0); 1905 ret = do_device_access(SCpnt, devip, lba, num, 0);
1901 read_unlock_irqrestore(&atomic_rw, iflags); 1906 read_unlock_irqrestore(&atomic_rw, iflags);
1902 if (ret == -1) 1907 if (ret == -1)
@@ -1931,55 +1936,62 @@ void dump_sector(unsigned char *buf, int len)
1931static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, 1936static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1932 unsigned int sectors, u32 ei_lba) 1937 unsigned int sectors, u32 ei_lba)
1933{ 1938{
1934 int i, j, ret; 1939 int ret;
1935 struct sd_dif_tuple *sdt; 1940 struct sd_dif_tuple *sdt;
1936 struct scatterlist *dsgl; 1941 void *daddr;
1937 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1938 void *daddr, *paddr;
1939 sector_t sector = start_sec; 1942 sector_t sector = start_sec;
1940 int ppage_offset; 1943 int ppage_offset;
1944 int dpage_offset;
1945 struct sg_mapping_iter diter;
1946 struct sg_mapping_iter piter;
1941 1947
1942 BUG_ON(scsi_sg_count(SCpnt) == 0); 1948 BUG_ON(scsi_sg_count(SCpnt) == 0);
1943 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); 1949 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1944 1950
1945 ppage_offset = 0; 1951 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
1946 1952 scsi_prot_sg_count(SCpnt),
1947 /* For each data page */ 1953 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1948 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { 1954 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1949 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset; 1955 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1950 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; 1956
1951 1957 /* For each protection page */
1952 /* For each sector-sized chunk in data page */ 1958 while (sg_miter_next(&piter)) {
1953 for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) { 1959 dpage_offset = 0;
1960 if (WARN_ON(!sg_miter_next(&diter))) {
1961 ret = 0x01;
1962 goto out;
1963 }
1954 1964
1965 for (ppage_offset = 0; ppage_offset < piter.length;
1966 ppage_offset += sizeof(struct sd_dif_tuple)) {
1955 /* If we're at the end of the current 1967 /* If we're at the end of the current
1956 * protection page advance to the next one 1968 * data page advance to the next one
1957 */ 1969 */
1958 if (ppage_offset >= psgl->length) { 1970 if (dpage_offset >= diter.length) {
1959 kunmap_atomic(paddr); 1971 if (WARN_ON(!sg_miter_next(&diter))) {
1960 psgl = sg_next(psgl); 1972 ret = 0x01;
1961 BUG_ON(psgl == NULL); 1973 goto out;
1962 paddr = kmap_atomic(sg_page(psgl)) 1974 }
1963 + psgl->offset; 1975 dpage_offset = 0;
1964 ppage_offset = 0;
1965 } 1976 }
1966 1977
1967 sdt = paddr + ppage_offset; 1978 sdt = piter.addr + ppage_offset;
1979 daddr = diter.addr + dpage_offset;
1968 1980
1969 ret = dif_verify(sdt, daddr + j, sector, ei_lba); 1981 ret = dif_verify(sdt, daddr, sector, ei_lba);
1970 if (ret) { 1982 if (ret) {
1971 dump_sector(daddr + j, scsi_debug_sector_size); 1983 dump_sector(daddr, scsi_debug_sector_size);
1972 goto out; 1984 goto out;
1973 } 1985 }
1974 1986
1975 sector++; 1987 sector++;
1976 ei_lba++; 1988 ei_lba++;
1977 ppage_offset += sizeof(struct sd_dif_tuple); 1989 dpage_offset += scsi_debug_sector_size;
1978 } 1990 }
1979 1991 diter.consumed = dpage_offset;
1980 kunmap_atomic(paddr); 1992 sg_miter_stop(&diter);
1981 kunmap_atomic(daddr);
1982 } 1993 }
1994 sg_miter_stop(&piter);
1983 1995
1984 dif_copy_prot(SCpnt, start_sec, sectors, false); 1996 dif_copy_prot(SCpnt, start_sec, sectors, false);
1985 dix_writes++; 1997 dix_writes++;
@@ -1988,8 +2000,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1988 2000
1989out: 2001out:
1990 dif_errors++; 2002 dif_errors++;
1991 kunmap_atomic(paddr); 2003 sg_miter_stop(&diter);
1992 kunmap_atomic(daddr); 2004 sg_miter_stop(&piter);
1993 return ret; 2005 return ret;
1994} 2006}
1995 2007
@@ -2089,17 +2101,19 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2089 if (ret) 2101 if (ret)
2090 return ret; 2102 return ret;
2091 2103
2104 write_lock_irqsave(&atomic_rw, iflags);
2105
2092 /* DIX + T10 DIF */ 2106 /* DIX + T10 DIF */
2093 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { 2107 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2094 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); 2108 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2095 2109
2096 if (prot_ret) { 2110 if (prot_ret) {
2111 write_unlock_irqrestore(&atomic_rw, iflags);
2097 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret); 2112 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2098 return illegal_condition_result; 2113 return illegal_condition_result;
2099 } 2114 }
2100 } 2115 }
2101 2116
2102 write_lock_irqsave(&atomic_rw, iflags);
2103 ret = do_device_access(SCpnt, devip, lba, num, 1); 2117 ret = do_device_access(SCpnt, devip, lba, num, 1);
2104 if (scsi_debug_lbp()) 2118 if (scsi_debug_lbp())
2105 map_region(lba, num); 2119 map_region(lba, num);
@@ -2178,6 +2192,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2178 struct unmap_block_desc *desc; 2192 struct unmap_block_desc *desc;
2179 unsigned int i, payload_len, descriptors; 2193 unsigned int i, payload_len, descriptors;
2180 int ret; 2194 int ret;
2195 unsigned long iflags;
2181 2196
2182 ret = check_readiness(scmd, 1, devip); 2197 ret = check_readiness(scmd, 1, devip);
2183 if (ret) 2198 if (ret)
@@ -2199,6 +2214,8 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2199 2214
2200 desc = (void *)&buf[8]; 2215 desc = (void *)&buf[8];
2201 2216
2217 write_lock_irqsave(&atomic_rw, iflags);
2218
2202 for (i = 0 ; i < descriptors ; i++) { 2219 for (i = 0 ; i < descriptors ; i++) {
2203 unsigned long long lba = get_unaligned_be64(&desc[i].lba); 2220 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2204 unsigned int num = get_unaligned_be32(&desc[i].blocks); 2221 unsigned int num = get_unaligned_be32(&desc[i].blocks);
@@ -2213,6 +2230,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2213 ret = 0; 2230 ret = 0;
2214 2231
2215out: 2232out:
2233 write_unlock_irqrestore(&atomic_rw, iflags);
2216 kfree(buf); 2234 kfree(buf);
2217 2235
2218 return ret; 2236 return ret;
@@ -2313,36 +2331,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2313static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, 2331static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2314 unsigned int num, struct sdebug_dev_info *devip) 2332 unsigned int num, struct sdebug_dev_info *devip)
2315{ 2333{
2316 int i, j, ret = -1; 2334 int j;
2317 unsigned char *kaddr, *buf; 2335 unsigned char *kaddr, *buf;
2318 unsigned int offset; 2336 unsigned int offset;
2319 struct scatterlist *sg;
2320 struct scsi_data_buffer *sdb = scsi_in(scp); 2337 struct scsi_data_buffer *sdb = scsi_in(scp);
2338 struct sg_mapping_iter miter;
2321 2339
2322 /* better not to use temporary buffer. */ 2340 /* better not to use temporary buffer. */
2323 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); 2341 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2324 if (!buf) 2342 if (!buf) {
2325 return ret; 2343 mk_sense_buffer(devip, NOT_READY,
2344 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2345 return check_condition_result;
2346 }
2326 2347
2327 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); 2348 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2328 2349
2329 offset = 0; 2350 offset = 0;
2330 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { 2351 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2331 kaddr = (unsigned char *)kmap_atomic(sg_page(sg)); 2352 SG_MITER_ATOMIC | SG_MITER_TO_SG);
2332 if (!kaddr)
2333 goto out;
2334 2353
2335 for (j = 0; j < sg->length; j++) 2354 while (sg_miter_next(&miter)) {
2336 *(kaddr + sg->offset + j) ^= *(buf + offset + j); 2355 kaddr = miter.addr;
2356 for (j = 0; j < miter.length; j++)
2357 *(kaddr + j) ^= *(buf + offset + j);
2337 2358
2338 offset += sg->length; 2359 offset += miter.length;
2339 kunmap_atomic(kaddr);
2340 } 2360 }
2341 ret = 0; 2361 sg_miter_stop(&miter);
2342out:
2343 kfree(buf); 2362 kfree(buf);
2344 2363
2345 return ret; 2364 return 0;
2346} 2365}
2347 2366
2348/* When timer goes off this function is called. */ 2367/* When timer goes off this function is called. */
@@ -2744,6 +2763,7 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
2744 */ 2763 */
2745module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); 2764module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2746module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2765module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2766module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
2747module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); 2767module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2748module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); 2768module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2749module_param_named(dif, scsi_debug_dif, int, S_IRUGO); 2769module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
@@ -2787,6 +2807,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
2787 2807
2788MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); 2808MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2789MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 2809MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2810MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
2790MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); 2811MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2791MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 2812MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2792MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2813MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -3248,7 +3269,7 @@ static struct attribute *sdebug_drv_attrs[] = {
3248}; 3269};
3249ATTRIBUTE_GROUPS(sdebug_drv); 3270ATTRIBUTE_GROUPS(sdebug_drv);
3250 3271
3251struct device *pseudo_primary; 3272static struct device *pseudo_primary;
3252 3273
3253static int __init scsi_debug_init(void) 3274static int __init scsi_debug_init(void)
3254{ 3275{
@@ -3934,6 +3955,8 @@ static int sdebug_driver_probe(struct device * dev)
3934 sdbg_host = to_sdebug_host(dev); 3955 sdbg_host = to_sdebug_host(dev);
3935 3956
3936 sdebug_driver_template.can_queue = scsi_debug_max_queue; 3957 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3958 if (scsi_debug_clustering)
3959 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
3937 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 3960 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3938 if (NULL == hpnt) { 3961 if (NULL == hpnt) {
3939 printk(KERN_ERR "%s: scsi_register failed\n", __func__); 3962 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 78b004da2885..771c16bfdbac 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2288,6 +2288,11 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
2288 if (scsi_autopm_get_host(shost) < 0) 2288 if (scsi_autopm_get_host(shost) < 0)
2289 return FAILED; 2289 return FAILED;
2290 2290
2291 if (!get_device(&dev->sdev_gendev)) {
2292 rtn = FAILED;
2293 goto out_put_autopm_host;
2294 }
2295
2291 scmd = scsi_get_command(dev, GFP_KERNEL); 2296 scmd = scsi_get_command(dev, GFP_KERNEL);
2292 blk_rq_init(NULL, &req); 2297 blk_rq_init(NULL, &req);
2293 scmd->request = &req; 2298 scmd->request = &req;
@@ -2345,6 +2350,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
2345 scsi_run_host_queues(shost); 2350 scsi_run_host_queues(shost);
2346 2351
2347 scsi_next_command(scmd); 2352 scsi_next_command(scmd);
2353out_put_autopm_host:
2348 scsi_autopm_put_host(shost); 2354 scsi_autopm_put_host(shost);
2349 return rtn; 2355 return rtn;
2350} 2356}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ec84b42e31..5681c05ac506 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,28 +75,6 @@ struct kmem_cache *scsi_sdb_cache;
75 */ 75 */
76#define SCSI_QUEUE_DELAY 3 76#define SCSI_QUEUE_DELAY 3
77 77
78/*
79 * Function: scsi_unprep_request()
80 *
81 * Purpose: Remove all preparation done for a request, including its
82 * associated scsi_cmnd, so that it can be requeued.
83 *
84 * Arguments: req - request to unprepare
85 *
86 * Lock status: Assumed that no locks are held upon entry.
87 *
88 * Returns: Nothing.
89 */
90static void scsi_unprep_request(struct request *req)
91{
92 struct scsi_cmnd *cmd = req->special;
93
94 blk_unprep_request(req);
95 req->special = NULL;
96
97 scsi_put_command(cmd);
98}
99
100/** 78/**
101 * __scsi_queue_insert - private queue insertion 79 * __scsi_queue_insert - private queue insertion
102 * @cmd: The SCSI command being requeued 80 * @cmd: The SCSI command being requeued
@@ -385,29 +363,12 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
385 return 0; 363 return 0;
386} 364}
387 365
388/* 366static void scsi_starved_list_run(struct Scsi_Host *shost)
389 * Function: scsi_run_queue()
390 *
391 * Purpose: Select a proper request queue to serve next
392 *
393 * Arguments: q - last request's queue
394 *
395 * Returns: Nothing
396 *
397 * Notes: The previous command was completely finished, start
398 * a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{ 367{
402 struct scsi_device *sdev = q->queuedata;
403 struct Scsi_Host *shost;
404 LIST_HEAD(starved_list); 368 LIST_HEAD(starved_list);
369 struct scsi_device *sdev;
405 unsigned long flags; 370 unsigned long flags;
406 371
407 shost = sdev->host;
408 if (scsi_target(sdev)->single_lun)
409 scsi_single_lun_run(sdev);
410
411 spin_lock_irqsave(shost->host_lock, flags); 372 spin_lock_irqsave(shost->host_lock, flags);
412 list_splice_init(&shost->starved_list, &starved_list); 373 list_splice_init(&shost->starved_list, &starved_list);
413 374
@@ -459,6 +420,28 @@ static void scsi_run_queue(struct request_queue *q)
459 /* put any unprocessed entries back */ 420 /* put any unprocessed entries back */
460 list_splice(&starved_list, &shost->starved_list); 421 list_splice(&starved_list, &shost->starved_list);
461 spin_unlock_irqrestore(shost->host_lock, flags); 422 spin_unlock_irqrestore(shost->host_lock, flags);
423}
424
425/*
426 * Function: scsi_run_queue()
427 *
428 * Purpose: Select a proper request queue to serve next
429 *
430 * Arguments: q - last request's queue
431 *
432 * Returns: Nothing
433 *
434 * Notes: The previous command was completely finished, start
435 * a new one if possible.
436 */
437static void scsi_run_queue(struct request_queue *q)
438{
439 struct scsi_device *sdev = q->queuedata;
440
441 if (scsi_target(sdev)->single_lun)
442 scsi_single_lun_run(sdev);
443 if (!list_empty(&sdev->host->starved_list))
444 scsi_starved_list_run(sdev->host);
462 445
463 blk_run_queue(q); 446 blk_run_queue(q);
464} 447}
@@ -497,16 +480,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
497 struct request *req = cmd->request; 480 struct request *req = cmd->request;
498 unsigned long flags; 481 unsigned long flags;
499 482
500 /*
501 * We need to hold a reference on the device to avoid the queue being
502 * killed after the unlock and before scsi_run_queue is invoked which
503 * may happen because scsi_unprep_request() puts the command which
504 * releases its reference on the device.
505 */
506 get_device(&sdev->sdev_gendev);
507
508 spin_lock_irqsave(q->queue_lock, flags); 483 spin_lock_irqsave(q->queue_lock, flags);
509 scsi_unprep_request(req); 484 blk_unprep_request(req);
485 req->special = NULL;
486 scsi_put_command(cmd);
510 blk_requeue_request(q, req); 487 blk_requeue_request(q, req);
511 spin_unlock_irqrestore(q->queue_lock, flags); 488 spin_unlock_irqrestore(q->queue_lock, flags);
512 489
@@ -520,13 +497,9 @@ void scsi_next_command(struct scsi_cmnd *cmd)
520 struct scsi_device *sdev = cmd->device; 497 struct scsi_device *sdev = cmd->device;
521 struct request_queue *q = sdev->request_queue; 498 struct request_queue *q = sdev->request_queue;
522 499
523 /* need to hold a reference on the device before we let go of the cmd */
524 get_device(&sdev->sdev_gendev);
525
526 scsi_put_command(cmd); 500 scsi_put_command(cmd);
527 scsi_run_queue(q); 501 scsi_run_queue(q);
528 502
529 /* ok to remove device now */
530 put_device(&sdev->sdev_gendev); 503 put_device(&sdev->sdev_gendev);
531} 504}
532 505
@@ -788,6 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
788 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 761 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
789 ACTION_DELAYED_RETRY} action; 762 ACTION_DELAYED_RETRY} action;
790 char *description = NULL; 763 char *description = NULL;
764 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
791 765
792 if (result) { 766 if (result) {
793 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 767 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -989,6 +963,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
989 action = ACTION_FAIL; 963 action = ACTION_FAIL;
990 } 964 }
991 965
966 if (action != ACTION_FAIL &&
967 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
968 action = ACTION_FAIL;
969 description = "Command timed out";
970 }
971
992 switch (action) { 972 switch (action) {
993 case ACTION_FAIL: 973 case ACTION_FAIL:
994 /* Give up and fail the remainder of the request */ 974 /* Give up and fail the remainder of the request */
@@ -1111,6 +1091,7 @@ err_exit:
1111 scsi_release_buffers(cmd); 1091 scsi_release_buffers(cmd);
1112 cmd->request->special = NULL; 1092 cmd->request->special = NULL;
1113 scsi_put_command(cmd); 1093 scsi_put_command(cmd);
1094 put_device(&cmd->device->sdev_gendev);
1114 return error; 1095 return error;
1115} 1096}
1116EXPORT_SYMBOL(scsi_init_io); 1097EXPORT_SYMBOL(scsi_init_io);
@@ -1121,9 +1102,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1121 struct scsi_cmnd *cmd; 1102 struct scsi_cmnd *cmd;
1122 1103
1123 if (!req->special) { 1104 if (!req->special) {
1105 /* Bail if we can't get a reference to the device */
1106 if (!get_device(&sdev->sdev_gendev))
1107 return NULL;
1108
1124 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1109 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1125 if (unlikely(!cmd)) 1110 if (unlikely(!cmd)) {
1111 put_device(&sdev->sdev_gendev);
1126 return NULL; 1112 return NULL;
1113 }
1127 req->special = cmd; 1114 req->special = cmd;
1128 } else { 1115 } else {
1129 cmd = req->special; 1116 cmd = req->special;
@@ -1286,6 +1273,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1286 struct scsi_cmnd *cmd = req->special; 1273 struct scsi_cmnd *cmd = req->special;
1287 scsi_release_buffers(cmd); 1274 scsi_release_buffers(cmd);
1288 scsi_put_command(cmd); 1275 scsi_put_command(cmd);
1276 put_device(&cmd->device->sdev_gendev);
1289 req->special = NULL; 1277 req->special = NULL;
1290 } 1278 }
1291 break; 1279 break;
@@ -1543,16 +1531,14 @@ static void scsi_softirq_done(struct request *rq)
1543 * Lock status: IO request lock assumed to be held when called. 1531 * Lock status: IO request lock assumed to be held when called.
1544 */ 1532 */
1545static void scsi_request_fn(struct request_queue *q) 1533static void scsi_request_fn(struct request_queue *q)
1534 __releases(q->queue_lock)
1535 __acquires(q->queue_lock)
1546{ 1536{
1547 struct scsi_device *sdev = q->queuedata; 1537 struct scsi_device *sdev = q->queuedata;
1548 struct Scsi_Host *shost; 1538 struct Scsi_Host *shost;
1549 struct scsi_cmnd *cmd; 1539 struct scsi_cmnd *cmd;
1550 struct request *req; 1540 struct request *req;
1551 1541
1552 if(!get_device(&sdev->sdev_gendev))
1553 /* We must be tearing the block queue down already */
1554 return;
1555
1556 /* 1542 /*
1557 * To start with, we keep looping until the queue is empty, or until 1543 * To start with, we keep looping until the queue is empty, or until
1558 * the host is no longer able to accept any more requests. 1544 * the host is no longer able to accept any more requests.
@@ -1641,7 +1627,7 @@ static void scsi_request_fn(struct request_queue *q)
1641 goto out_delay; 1627 goto out_delay;
1642 } 1628 }
1643 1629
1644 goto out; 1630 return;
1645 1631
1646 not_ready: 1632 not_ready:
1647 spin_unlock_irq(shost->host_lock); 1633 spin_unlock_irq(shost->host_lock);
@@ -1660,12 +1646,6 @@ static void scsi_request_fn(struct request_queue *q)
1660out_delay: 1646out_delay:
1661 if (sdev->device_busy == 0) 1647 if (sdev->device_busy == 0)
1662 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1648 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1663out:
1664 /* must be careful here...if we trigger the ->remove() function
1665 * we cannot be holding the q lock */
1666 spin_unlock_irq(q->queue_lock);
1667 put_device(&sdev->sdev_gendev);
1668 spin_lock_irq(q->queue_lock);
1669} 1649}
1670 1650
1671u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1651u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 307a81137607..27f96d5b7680 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
320 struct Scsi_Host *shost = dev_to_shost(dev->parent); 320 struct Scsi_Host *shost = dev_to_shost(dev->parent);
321 unsigned long flags; 321 unsigned long flags;
322 322
323 starget->state = STARGET_DEL;
323 transport_destroy_device(dev); 324 transport_destroy_device(dev);
324 spin_lock_irqsave(shost->host_lock, flags); 325 spin_lock_irqsave(shost->host_lock, flags);
325 if (shost->hostt->target_destroy) 326 if (shost->hostt->target_destroy)
@@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
371} 372}
372 373
373/** 374/**
375 * scsi_target_reap_ref_release - remove target from visibility
376 * @kref: the reap_ref in the target being released
377 *
378 * Called on last put of reap_ref, which is the indication that no device
379 * under this target is visible anymore, so render the target invisible in
380 * sysfs. Note: we have to be in user context here because the target reaps
381 * should be done in places where the scsi device visibility is being removed.
382 */
383static void scsi_target_reap_ref_release(struct kref *kref)
384{
385 struct scsi_target *starget
386 = container_of(kref, struct scsi_target, reap_ref);
387
388 /*
389 * if we get here and the target is still in the CREATED state that
390 * means it was allocated but never made visible (because a scan
391 * turned up no LUNs), so don't call device_del() on it.
392 */
393 if (starget->state != STARGET_CREATED) {
394 transport_remove_device(&starget->dev);
395 device_del(&starget->dev);
396 }
397 scsi_target_destroy(starget);
398}
399
400static void scsi_target_reap_ref_put(struct scsi_target *starget)
401{
402 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
403}
404
405/**
374 * scsi_alloc_target - allocate a new or find an existing target 406 * scsi_alloc_target - allocate a new or find an existing target
375 * @parent: parent of the target (need not be a scsi host) 407 * @parent: parent of the target (need not be a scsi host)
376 * @channel: target channel number (zero if no channels) 408 * @channel: target channel number (zero if no channels)
@@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
392 + shost->transportt->target_size; 424 + shost->transportt->target_size;
393 struct scsi_target *starget; 425 struct scsi_target *starget;
394 struct scsi_target *found_target; 426 struct scsi_target *found_target;
395 int error; 427 int error, ref_got;
396 428
397 starget = kzalloc(size, GFP_KERNEL); 429 starget = kzalloc(size, GFP_KERNEL);
398 if (!starget) { 430 if (!starget) {
@@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
401 } 433 }
402 dev = &starget->dev; 434 dev = &starget->dev;
403 device_initialize(dev); 435 device_initialize(dev);
404 starget->reap_ref = 1; 436 kref_init(&starget->reap_ref);
405 dev->parent = get_device(parent); 437 dev->parent = get_device(parent);
406 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 438 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
407 dev->bus = &scsi_bus_type; 439 dev->bus = &scsi_bus_type;
@@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
441 return starget; 473 return starget;
442 474
443 found: 475 found:
444 found_target->reap_ref++; 476 /*
477 * release routine already fired if kref is zero, so if we can still
478 * take the reference, the target must be alive. If we can't, it must
479 * be dying and we need to wait for a new target
480 */
481 ref_got = kref_get_unless_zero(&found_target->reap_ref);
482
445 spin_unlock_irqrestore(shost->host_lock, flags); 483 spin_unlock_irqrestore(shost->host_lock, flags);
446 if (found_target->state != STARGET_DEL) { 484 if (ref_got) {
447 put_device(dev); 485 put_device(dev);
448 return found_target; 486 return found_target;
449 } 487 }
450 /* Unfortunately, we found a dying target; need to 488 /*
451 * wait until it's dead before we can get a new one */ 489 * Unfortunately, we found a dying target; need to wait until it's
490 * dead before we can get a new one. There is an anomaly here. We
491 * *should* call scsi_target_reap() to balance the kref_get() of the
492 * reap_ref above. However, since the target being released, it's
493 * already invisible and the reap_ref is irrelevant. If we call
494 * scsi_target_reap() we might spuriously do another device_del() on
495 * an already invisible target.
496 */
452 put_device(&found_target->dev); 497 put_device(&found_target->dev);
453 flush_scheduled_work(); 498 /*
499 * length of time is irrelevant here, we just want to yield the CPU
500 * for a tick to avoid busy waiting for the target to die.
501 */
502 msleep(1);
454 goto retry; 503 goto retry;
455} 504}
456 505
457static void scsi_target_reap_usercontext(struct work_struct *work)
458{
459 struct scsi_target *starget =
460 container_of(work, struct scsi_target, ew.work);
461
462 transport_remove_device(&starget->dev);
463 device_del(&starget->dev);
464 scsi_target_destroy(starget);
465}
466
467/** 506/**
468 * scsi_target_reap - check to see if target is in use and destroy if not 507 * scsi_target_reap - check to see if target is in use and destroy if not
469 * @starget: target to be checked 508 * @starget: target to be checked
@@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
474 */ 513 */
475void scsi_target_reap(struct scsi_target *starget) 514void scsi_target_reap(struct scsi_target *starget)
476{ 515{
477 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 516 /*
478 unsigned long flags; 517 * serious problem if this triggers: STARGET_DEL is only set in the if
479 enum scsi_target_state state; 518 * the reap_ref drops to zero, so we're trying to do another final put
480 int empty = 0; 519 * on an already released kref
481 520 */
482 spin_lock_irqsave(shost->host_lock, flags); 521 BUG_ON(starget->state == STARGET_DEL);
483 state = starget->state; 522 scsi_target_reap_ref_put(starget);
484 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
485 empty = 1;
486 starget->state = STARGET_DEL;
487 }
488 spin_unlock_irqrestore(shost->host_lock, flags);
489
490 if (!empty)
491 return;
492
493 BUG_ON(state == STARGET_DEL);
494 if (state == STARGET_CREATED)
495 scsi_target_destroy(starget);
496 else
497 execute_in_process_context(scsi_target_reap_usercontext,
498 &starget->ew);
499} 523}
500 524
501/** 525/**
@@ -946,6 +970,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
946 } 970 }
947 } 971 }
948 972
973 if (sdev->scsi_level >= SCSI_3)
974 scsi_attach_vpd(sdev);
975
949 sdev->max_queue_depth = sdev->queue_depth; 976 sdev->max_queue_depth = sdev->queue_depth;
950 977
951 /* 978 /*
@@ -1532,6 +1559,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1532 } 1559 }
1533 mutex_unlock(&shost->scan_mutex); 1560 mutex_unlock(&shost->scan_mutex);
1534 scsi_autopm_put_target(starget); 1561 scsi_autopm_put_target(starget);
1562 /*
1563 * paired with scsi_alloc_target(). Target will be destroyed unless
1564 * scsi_probe_and_add_lun made an underlying device visible
1565 */
1535 scsi_target_reap(starget); 1566 scsi_target_reap(starget);
1536 put_device(&starget->dev); 1567 put_device(&starget->dev);
1537 1568
@@ -1612,8 +1643,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1612 1643
1613 out_reap: 1644 out_reap:
1614 scsi_autopm_put_target(starget); 1645 scsi_autopm_put_target(starget);
1615 /* now determine if the target has any children at all 1646 /*
1616 * and if not, nuke it */ 1647 * paired with scsi_alloc_target(): determine if the target has
1648 * any children at all and if not, nuke it
1649 */
1617 scsi_target_reap(starget); 1650 scsi_target_reap(starget);
1618 1651
1619 put_device(&starget->dev); 1652 put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8ead24c3453a..074e8cc30955 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -300,7 +300,9 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
300 int ret = -EINVAL; 300 int ret = -EINVAL;
301 unsigned long deadline, flags; 301 unsigned long deadline, flags;
302 302
303 if (shost->transportt && shost->transportt->eh_strategy_handler) 303 if (shost->transportt &&
304 (shost->transportt->eh_strategy_handler ||
305 !shost->hostt->eh_host_reset_handler))
304 return ret; 306 return ret;
305 307
306 if (!strncmp(buf, "off", strlen("off"))) 308 if (!strncmp(buf, "off", strlen("off")))
@@ -383,17 +385,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
383{ 385{
384 struct scsi_device *sdev; 386 struct scsi_device *sdev;
385 struct device *parent; 387 struct device *parent;
386 struct scsi_target *starget;
387 struct list_head *this, *tmp; 388 struct list_head *this, *tmp;
388 unsigned long flags; 389 unsigned long flags;
389 390
390 sdev = container_of(work, struct scsi_device, ew.work); 391 sdev = container_of(work, struct scsi_device, ew.work);
391 392
392 parent = sdev->sdev_gendev.parent; 393 parent = sdev->sdev_gendev.parent;
393 starget = to_scsi_target(parent);
394 394
395 spin_lock_irqsave(sdev->host->host_lock, flags); 395 spin_lock_irqsave(sdev->host->host_lock, flags);
396 starget->reap_ref++;
397 list_del(&sdev->siblings); 396 list_del(&sdev->siblings);
398 list_del(&sdev->same_target_siblings); 397 list_del(&sdev->same_target_siblings);
399 list_del(&sdev->starved_entry); 398 list_del(&sdev->starved_entry);
@@ -413,8 +412,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
413 /* NULL queue means the device can't be used */ 412 /* NULL queue means the device can't be used */
414 sdev->request_queue = NULL; 413 sdev->request_queue = NULL;
415 414
416 scsi_target_reap(scsi_target(sdev)); 415 kfree(sdev->vpd_pg83);
417 416 kfree(sdev->vpd_pg80);
418 kfree(sdev->inquiry); 417 kfree(sdev->inquiry);
419 kfree(sdev); 418 kfree(sdev);
420 419
@@ -579,7 +578,6 @@ static int scsi_sdev_check_buf_bit(const char *buf)
579 * Create the actual show/store functions and data structures. 578 * Create the actual show/store functions and data structures.
580 */ 579 */
581sdev_rd_attr (device_blocked, "%d\n"); 580sdev_rd_attr (device_blocked, "%d\n");
582sdev_rd_attr (queue_depth, "%d\n");
583sdev_rd_attr (device_busy, "%d\n"); 581sdev_rd_attr (device_busy, "%d\n");
584sdev_rd_attr (type, "%d\n"); 582sdev_rd_attr (type, "%d\n");
585sdev_rd_attr (scsi_level, "%d\n"); 583sdev_rd_attr (scsi_level, "%d\n");
@@ -712,10 +710,64 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr,
712 return snprintf(buf, 20, "%s\n", name); 710 return snprintf(buf, 20, "%s\n", name);
713} 711}
714 712
715static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL); 713static ssize_t
714store_queue_type_field(struct device *dev, struct device_attribute *attr,
715 const char *buf, size_t count)
716{
717 struct scsi_device *sdev = to_scsi_device(dev);
718 struct scsi_host_template *sht = sdev->host->hostt;
719 int tag_type = 0, retval;
720 int prev_tag_type = scsi_get_tag_type(sdev);
721
722 if (!sdev->tagged_supported || !sht->change_queue_type)
723 return -EINVAL;
724
725 if (strncmp(buf, "ordered", 7) == 0)
726 tag_type = MSG_ORDERED_TAG;
727 else if (strncmp(buf, "simple", 6) == 0)
728 tag_type = MSG_SIMPLE_TAG;
729 else if (strncmp(buf, "none", 4) != 0)
730 return -EINVAL;
731
732 if (tag_type == prev_tag_type)
733 return count;
734
735 retval = sht->change_queue_type(sdev, tag_type);
736 if (retval < 0)
737 return retval;
738
739 return count;
740}
741
742static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
743 store_queue_type_field);
744
745#define sdev_vpd_pg_attr(_page) \
746static ssize_t \
747show_vpd_##_page(struct file *filp, struct kobject *kobj, \
748 struct bin_attribute *bin_attr, \
749 char *buf, loff_t off, size_t count) \
750{ \
751 struct device *dev = container_of(kobj, struct device, kobj); \
752 struct scsi_device *sdev = to_scsi_device(dev); \
753 if (!sdev->vpd_##_page) \
754 return -EINVAL; \
755 return memory_read_from_buffer(buf, count, &off, \
756 sdev->vpd_##_page, \
757 sdev->vpd_##_page##_len); \
758} \
759static struct bin_attribute dev_attr_vpd_##_page = { \
760 .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
761 .size = 0, \
762 .read = show_vpd_##_page, \
763};
764
765sdev_vpd_pg_attr(pg83);
766sdev_vpd_pg_attr(pg80);
716 767
717static ssize_t 768static ssize_t
718show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf) 769show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
770 char *buf)
719{ 771{
720 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); 772 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
721} 773}
@@ -786,46 +838,9 @@ DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
786DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) 838DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
787DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) 839DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
788 840
789/* Default template for device attributes. May NOT be modified */
790static struct attribute *scsi_sdev_attrs[] = {
791 &dev_attr_device_blocked.attr,
792 &dev_attr_type.attr,
793 &dev_attr_scsi_level.attr,
794 &dev_attr_device_busy.attr,
795 &dev_attr_vendor.attr,
796 &dev_attr_model.attr,
797 &dev_attr_rev.attr,
798 &dev_attr_rescan.attr,
799 &dev_attr_delete.attr,
800 &dev_attr_state.attr,
801 &dev_attr_timeout.attr,
802 &dev_attr_eh_timeout.attr,
803 &dev_attr_iocounterbits.attr,
804 &dev_attr_iorequest_cnt.attr,
805 &dev_attr_iodone_cnt.attr,
806 &dev_attr_ioerr_cnt.attr,
807 &dev_attr_modalias.attr,
808 REF_EVT(media_change),
809 REF_EVT(inquiry_change_reported),
810 REF_EVT(capacity_change_reported),
811 REF_EVT(soft_threshold_reached),
812 REF_EVT(mode_parameter_change_reported),
813 REF_EVT(lun_change_reported),
814 NULL
815};
816
817static struct attribute_group scsi_sdev_attr_group = {
818 .attrs = scsi_sdev_attrs,
819};
820
821static const struct attribute_group *scsi_sdev_attr_groups[] = {
822 &scsi_sdev_attr_group,
823 NULL
824};
825
826static ssize_t 841static ssize_t
827sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, 842sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
828 const char *buf, size_t count) 843 const char *buf, size_t count)
829{ 844{
830 int depth, retval; 845 int depth, retval;
831 struct scsi_device *sdev = to_scsi_device(dev); 846 struct scsi_device *sdev = to_scsi_device(dev);
@@ -848,10 +863,10 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
848 863
849 return count; 864 return count;
850} 865}
866sdev_show_function(queue_depth, "%d\n");
851 867
852static struct device_attribute sdev_attr_queue_depth_rw = 868static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
853 __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, 869 sdev_store_queue_depth);
854 sdev_store_queue_depth_rw);
855 870
856static ssize_t 871static ssize_t
857sdev_show_queue_ramp_up_period(struct device *dev, 872sdev_show_queue_ramp_up_period(struct device *dev,
@@ -879,40 +894,79 @@ sdev_store_queue_ramp_up_period(struct device *dev,
879 return period; 894 return period;
880} 895}
881 896
882static struct device_attribute sdev_attr_queue_ramp_up_period = 897static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
883 __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, 898 sdev_show_queue_ramp_up_period,
884 sdev_show_queue_ramp_up_period, 899 sdev_store_queue_ramp_up_period);
885 sdev_store_queue_ramp_up_period);
886 900
887static ssize_t 901static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
888sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, 902 struct attribute *attr, int i)
889 const char *buf, size_t count)
890{ 903{
904 struct device *dev = container_of(kobj, struct device, kobj);
891 struct scsi_device *sdev = to_scsi_device(dev); 905 struct scsi_device *sdev = to_scsi_device(dev);
892 struct scsi_host_template *sht = sdev->host->hostt;
893 int tag_type = 0, retval;
894 int prev_tag_type = scsi_get_tag_type(sdev);
895 906
896 if (!sdev->tagged_supported || !sht->change_queue_type)
897 return -EINVAL;
898 907
899 if (strncmp(buf, "ordered", 7) == 0) 908 if (attr == &dev_attr_queue_depth.attr &&
900 tag_type = MSG_ORDERED_TAG; 909 !sdev->host->hostt->change_queue_depth)
901 else if (strncmp(buf, "simple", 6) == 0) 910 return S_IRUGO;
902 tag_type = MSG_SIMPLE_TAG;
903 else if (strncmp(buf, "none", 4) != 0)
904 return -EINVAL;
905 911
906 if (tag_type == prev_tag_type) 912 if (attr == &dev_attr_queue_ramp_up_period.attr &&
907 return count; 913 !sdev->host->hostt->change_queue_depth)
914 return 0;
908 915
909 retval = sht->change_queue_type(sdev, tag_type); 916 if (attr == &dev_attr_queue_type.attr &&
910 if (retval < 0) 917 !sdev->host->hostt->change_queue_type)
911 return retval; 918 return S_IRUGO;
912 919
913 return count; 920 return attr->mode;
914} 921}
915 922
923/* Default template for device attributes. May NOT be modified */
924static struct attribute *scsi_sdev_attrs[] = {
925 &dev_attr_device_blocked.attr,
926 &dev_attr_type.attr,
927 &dev_attr_scsi_level.attr,
928 &dev_attr_device_busy.attr,
929 &dev_attr_vendor.attr,
930 &dev_attr_model.attr,
931 &dev_attr_rev.attr,
932 &dev_attr_rescan.attr,
933 &dev_attr_delete.attr,
934 &dev_attr_state.attr,
935 &dev_attr_timeout.attr,
936 &dev_attr_eh_timeout.attr,
937 &dev_attr_iocounterbits.attr,
938 &dev_attr_iorequest_cnt.attr,
939 &dev_attr_iodone_cnt.attr,
940 &dev_attr_ioerr_cnt.attr,
941 &dev_attr_modalias.attr,
942 &dev_attr_queue_depth.attr,
943 &dev_attr_queue_type.attr,
944 &dev_attr_queue_ramp_up_period.attr,
945 REF_EVT(media_change),
946 REF_EVT(inquiry_change_reported),
947 REF_EVT(capacity_change_reported),
948 REF_EVT(soft_threshold_reached),
949 REF_EVT(mode_parameter_change_reported),
950 REF_EVT(lun_change_reported),
951 NULL
952};
953
954static struct bin_attribute *scsi_sdev_bin_attrs[] = {
955 &dev_attr_vpd_pg83,
956 &dev_attr_vpd_pg80,
957 NULL
958};
959static struct attribute_group scsi_sdev_attr_group = {
960 .attrs = scsi_sdev_attrs,
961 .bin_attrs = scsi_sdev_bin_attrs,
962 .is_visible = scsi_sdev_attr_is_visible,
963};
964
965static const struct attribute_group *scsi_sdev_attr_groups[] = {
966 &scsi_sdev_attr_group,
967 NULL
968};
969
916static int scsi_target_add(struct scsi_target *starget) 970static int scsi_target_add(struct scsi_target *starget)
917{ 971{
918 int error; 972 int error;
@@ -935,10 +989,6 @@ static int scsi_target_add(struct scsi_target *starget)
935 return 0; 989 return 0;
936} 990}
937 991
938static struct device_attribute sdev_attr_queue_type_rw =
939 __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
940 sdev_store_queue_type_rw);
941
942/** 992/**
943 * scsi_sysfs_add_sdev - add scsi device to sysfs 993 * scsi_sysfs_add_sdev - add scsi device to sysfs
944 * @sdev: scsi_device to add 994 * @sdev: scsi_device to add
@@ -992,25 +1042,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
992 transport_add_device(&sdev->sdev_gendev); 1042 transport_add_device(&sdev->sdev_gendev);
993 sdev->is_visible = 1; 1043 sdev->is_visible = 1;
994 1044
995 /* create queue files, which may be writable, depending on the host */
996 if (sdev->host->hostt->change_queue_depth) {
997 error = device_create_file(&sdev->sdev_gendev,
998 &sdev_attr_queue_depth_rw);
999 error = device_create_file(&sdev->sdev_gendev,
1000 &sdev_attr_queue_ramp_up_period);
1001 }
1002 else
1003 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
1004 if (error)
1005 return error;
1006
1007 if (sdev->host->hostt->change_queue_type)
1008 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
1009 else
1010 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
1011 if (error)
1012 return error;
1013
1014 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 1045 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
1015 1046
1016 if (error) 1047 if (error)
@@ -1060,6 +1091,13 @@ void __scsi_remove_device(struct scsi_device *sdev)
1060 sdev->host->hostt->slave_destroy(sdev); 1091 sdev->host->hostt->slave_destroy(sdev);
1061 transport_destroy_device(dev); 1092 transport_destroy_device(dev);
1062 1093
1094 /*
1095 * Paired with the kref_get() in scsi_sysfs_initialize(). We have
1096 * remoed sysfs visibility from the device, so make the target
1097 * invisible if this was the last device underneath it.
1098 */
1099 scsi_target_reap(scsi_target(sdev));
1100
1063 put_device(dev); 1101 put_device(dev);
1064} 1102}
1065 1103
@@ -1122,7 +1160,7 @@ void scsi_remove_target(struct device *dev)
1122 continue; 1160 continue;
1123 if (starget->dev.parent == dev || &starget->dev == dev) { 1161 if (starget->dev.parent == dev || &starget->dev == dev) {
1124 /* assuming new targets arrive at the end */ 1162 /* assuming new targets arrive at the end */
1125 starget->reap_ref++; 1163 kref_get(&starget->reap_ref);
1126 spin_unlock_irqrestore(shost->host_lock, flags); 1164 spin_unlock_irqrestore(shost->host_lock, flags);
1127 if (last) 1165 if (last)
1128 scsi_target_reap(last); 1166 scsi_target_reap(last);
@@ -1206,6 +1244,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1206 list_add_tail(&sdev->same_target_siblings, &starget->devices); 1244 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1207 list_add_tail(&sdev->siblings, &shost->__devices); 1245 list_add_tail(&sdev->siblings, &shost->__devices);
1208 spin_unlock_irqrestore(shost->host_lock, flags); 1246 spin_unlock_irqrestore(shost->host_lock, flags);
1247 /*
1248 * device can now only be removed via __scsi_remove_device() so hold
1249 * the target. Target will be held in CREATED state until something
1250 * beneath it becomes visible (in which case it moves to RUNNING)
1251 */
1252 kref_get(&starget->reap_ref);
1209} 1253}
1210 1254
1211int scsi_is_sdev_device(const struct device *dev) 1255int scsi_is_sdev_device(const struct device *dev)
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 84a1fdf67864..e51add05fb8d 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -155,7 +155,8 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
155 __blk_put_request(q, rq); 155 __blk_put_request(q, rq);
156 spin_unlock_irqrestore(q->queue_lock, flags); 156 spin_unlock_irqrestore(q->queue_lock, flags);
157 157
158 __scsi_put_command(shost, cmd, &shost->shost_gendev); 158 __scsi_put_command(shost, cmd);
159 put_device(&shost->shost_gendev);
159} 160}
160EXPORT_SYMBOL_GPL(scsi_host_put_command); 161EXPORT_SYMBOL_GPL(scsi_host_put_command);
161 162
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4628fd5e0688..f80908f74ca9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -261,6 +261,7 @@ static const struct {
261 { FC_PORTSPEED_10GBIT, "10 Gbit" }, 261 { FC_PORTSPEED_10GBIT, "10 Gbit" },
262 { FC_PORTSPEED_8GBIT, "8 Gbit" }, 262 { FC_PORTSPEED_8GBIT, "8 Gbit" },
263 { FC_PORTSPEED_16GBIT, "16 Gbit" }, 263 { FC_PORTSPEED_16GBIT, "16 Gbit" },
264 { FC_PORTSPEED_32GBIT, "32 Gbit" },
264 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 265 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
265}; 266};
266fc_bitfield_name_search(port_speed, fc_port_speed_names) 267fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 470954aba728..89e6c04ac595 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1463,8 +1463,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1463 sd_print_sense_hdr(sdkp, &sshdr); 1463 sd_print_sense_hdr(sdkp, &sshdr);
1464 /* we need to evaluate the error return */ 1464 /* we need to evaluate the error return */
1465 if (scsi_sense_valid(&sshdr) && 1465 if (scsi_sense_valid(&sshdr) &&
1466 /* 0x3a is medium not present */ 1466 (sshdr.asc == 0x3a || /* medium not present */
1467 sshdr.asc == 0x3a) 1467 sshdr.asc == 0x20)) /* invalid command */
1468 /* this is no error here */ 1468 /* this is no error here */
1469 return 0; 1469 return 0;
1470 1470
@@ -2281,7 +2281,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2281 2281
2282 set_disk_ro(sdkp->disk, 0); 2282 set_disk_ro(sdkp->disk, 0);
2283 if (sdp->skip_ms_page_3f) { 2283 if (sdp->skip_ms_page_3f) {
2284 sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); 2284 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2285 return; 2285 return;
2286 } 2286 }
2287 2287
@@ -2313,7 +2313,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2313 } 2313 }
2314 2314
2315 if (!scsi_status_is_good(res)) { 2315 if (!scsi_status_is_good(res)) {
2316 sd_printk(KERN_WARNING, sdkp, 2316 sd_first_printk(KERN_WARNING, sdkp,
2317 "Test WP failed, assume Write Enabled\n"); 2317 "Test WP failed, assume Write Enabled\n");
2318 } else { 2318 } else {
2319 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2319 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
@@ -2381,7 +2381,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2381 if (!data.header_length) { 2381 if (!data.header_length) {
2382 modepage = 6; 2382 modepage = 6;
2383 first_len = 0; 2383 first_len = 0;
2384 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); 2384 sd_first_printk(KERN_ERR, sdkp,
2385 "Missing header in MODE_SENSE response\n");
2385 } 2386 }
2386 2387
2387 /* that went OK, now ask for the proper length */ 2388 /* that went OK, now ask for the proper length */
@@ -2394,7 +2395,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2394 if (len < 3) 2395 if (len < 3)
2395 goto bad_sense; 2396 goto bad_sense;
2396 else if (len > SD_BUF_SIZE) { 2397 else if (len > SD_BUF_SIZE) {
2397 sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " 2398 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2398 "data from %d to %d bytes\n", len, SD_BUF_SIZE); 2399 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2399 len = SD_BUF_SIZE; 2400 len = SD_BUF_SIZE;
2400 } 2401 }
@@ -2417,8 +2418,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2417 /* We're interested only in the first 3 bytes. 2418 /* We're interested only in the first 3 bytes.
2418 */ 2419 */
2419 if (len - offset <= 2) { 2420 if (len - offset <= 2) {
2420 sd_printk(KERN_ERR, sdkp, "Incomplete " 2421 sd_first_printk(KERN_ERR, sdkp,
2421 "mode parameter data\n"); 2422 "Incomplete mode parameter "
2423 "data\n");
2422 goto defaults; 2424 goto defaults;
2423 } else { 2425 } else {
2424 modepage = page_code; 2426 modepage = page_code;
@@ -2432,14 +2434,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2432 else if (!spf && len - offset > 1) 2434 else if (!spf && len - offset > 1)
2433 offset += 2 + buffer[offset+1]; 2435 offset += 2 + buffer[offset+1];
2434 else { 2436 else {
2435 sd_printk(KERN_ERR, sdkp, "Incomplete " 2437 sd_first_printk(KERN_ERR, sdkp,
2436 "mode parameter data\n"); 2438 "Incomplete mode "
2439 "parameter data\n");
2437 goto defaults; 2440 goto defaults;
2438 } 2441 }
2439 } 2442 }
2440 } 2443 }
2441 2444
2442 sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); 2445 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2443 goto defaults; 2446 goto defaults;
2444 2447
2445 Page_found: 2448 Page_found:
@@ -2453,7 +2456,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2453 2456
2454 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2457 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2455 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2458 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
2456 sd_printk(KERN_NOTICE, sdkp, 2459 sd_first_printk(KERN_NOTICE, sdkp,
2457 "Uses READ/WRITE(6), disabling FUA\n"); 2460 "Uses READ/WRITE(6), disabling FUA\n");
2458 sdkp->DPOFUA = 0; 2461 sdkp->DPOFUA = 0;
2459 } 2462 }
@@ -2475,16 +2478,19 @@ bad_sense:
2475 sshdr.sense_key == ILLEGAL_REQUEST && 2478 sshdr.sense_key == ILLEGAL_REQUEST &&
2476 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 2479 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2477 /* Invalid field in CDB */ 2480 /* Invalid field in CDB */
2478 sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); 2481 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
2479 else 2482 else
2480 sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); 2483 sd_first_printk(KERN_ERR, sdkp,
2484 "Asking for cache data failed\n");
2481 2485
2482defaults: 2486defaults:
2483 if (sdp->wce_default_on) { 2487 if (sdp->wce_default_on) {
2484 sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n"); 2488 sd_first_printk(KERN_NOTICE, sdkp,
2489 "Assuming drive cache: write back\n");
2485 sdkp->WCE = 1; 2490 sdkp->WCE = 1;
2486 } else { 2491 } else {
2487 sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); 2492 sd_first_printk(KERN_ERR, sdkp,
2493 "Assuming drive cache: write through\n");
2488 sdkp->WCE = 0; 2494 sdkp->WCE = 0;
2489 } 2495 }
2490 sdkp->RCD = 0; 2496 sdkp->RCD = 0;
@@ -2513,7 +2519,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2513 2519
2514 if (!scsi_status_is_good(res) || !data.header_length || 2520 if (!scsi_status_is_good(res) || !data.header_length ||
2515 data.length < 6) { 2521 data.length < 6) {
2516 sd_printk(KERN_WARNING, sdkp, 2522 sd_first_printk(KERN_WARNING, sdkp,
2517 "getting Control mode page failed, assume no ATO\n"); 2523 "getting Control mode page failed, assume no ATO\n");
2518 2524
2519 if (scsi_sense_valid(&sshdr)) 2525 if (scsi_sense_valid(&sshdr))
@@ -2525,7 +2531,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2525 offset = data.header_length + data.block_descriptor_length; 2531 offset = data.header_length + data.block_descriptor_length;
2526 2532
2527 if ((buffer[offset] & 0x3f) != 0x0a) { 2533 if ((buffer[offset] & 0x3f) != 0x0a) {
2528 sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); 2534 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2529 return; 2535 return;
2530 } 2536 }
2531 2537
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 26895ff247c5..620871efbf0a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -104,6 +104,12 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
104 (sdsk)->disk->disk_name, ##a) : \ 104 (sdsk)->disk->disk_name, ##a) : \
105 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 105 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
106 106
107#define sd_first_printk(prefix, sdsk, fmt, a...) \
108 do { \
109 if ((sdkp)->first_scan) \
110 sd_printk(prefix, sdsk, fmt, ##a); \
111 } while (0)
112
107static inline int scsi_medium_access_command(struct scsi_cmnd *scmd) 113static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
108{ 114{
109 switch (scmd->cmnd[0]) { 115 switch (scmd->cmnd[0]) {
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eba183c428cf..80bfece1a2de 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/enclosure.h> 27#include <linux/enclosure.h>
28#include <asm/unaligned.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -448,27 +449,18 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
448static void ses_match_to_enclosure(struct enclosure_device *edev, 449static void ses_match_to_enclosure(struct enclosure_device *edev,
449 struct scsi_device *sdev) 450 struct scsi_device *sdev)
450{ 451{
451 unsigned char *buf;
452 unsigned char *desc; 452 unsigned char *desc;
453 unsigned int vpd_len;
454 struct efd efd = { 453 struct efd efd = {
455 .addr = 0, 454 .addr = 0,
456 }; 455 };
457 456
458 buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
459 if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
460 goto free;
461
462 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 457 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
463 458
464 vpd_len = ((buf[2] << 8) | buf[3]) + 4; 459 if (!sdev->vpd_pg83_len)
465 kfree(buf); 460 return;
466 buf = kmalloc(vpd_len, GFP_KERNEL);
467 if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
468 goto free;
469 461
470 desc = buf + 4; 462 desc = sdev->vpd_pg83 + 4;
471 while (desc < buf + vpd_len) { 463 while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
472 enum scsi_protocol proto = desc[0] >> 4; 464 enum scsi_protocol proto = desc[0] >> 4;
473 u8 code_set = desc[0] & 0x0f; 465 u8 code_set = desc[0] & 0x0f;
474 u8 piv = desc[1] & 0x80; 466 u8 piv = desc[1] & 0x80;
@@ -478,25 +470,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
478 470
479 if (piv && code_set == 1 && assoc == 1 471 if (piv && code_set == 1 && assoc == 1
480 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) 472 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
481 efd.addr = (u64)desc[4] << 56 | 473 efd.addr = get_unaligned_be64(&desc[4]);
482 (u64)desc[5] << 48 |
483 (u64)desc[6] << 40 |
484 (u64)desc[7] << 32 |
485 (u64)desc[8] << 24 |
486 (u64)desc[9] << 16 |
487 (u64)desc[10] << 8 |
488 (u64)desc[11];
489 474
490 desc += len + 4; 475 desc += len + 4;
491 } 476 }
492 if (!efd.addr) 477 if (efd.addr) {
493 goto free; 478 efd.dev = &sdev->sdev_gendev;
494 479
495 efd.dev = &sdev->sdev_gendev; 480 enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
496 481 }
497 enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
498 free:
499 kfree(buf);
500} 482}
501 483
502static int ses_intf_add(struct device *cdev, 484static int ses_intf_add(struct device *cdev,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a1d6986261a3..afc834e172c6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2198,12 +2198,19 @@ static int st_set_options(struct scsi_tape *STp, long options)
2198 struct st_modedef *STm; 2198 struct st_modedef *STm;
2199 char *name = tape_name(STp); 2199 char *name = tape_name(STp);
2200 struct cdev *cd0, *cd1; 2200 struct cdev *cd0, *cd1;
2201 struct device *d0, *d1;
2201 2202
2202 STm = &(STp->modes[STp->current_mode]); 2203 STm = &(STp->modes[STp->current_mode]);
2203 if (!STm->defined) { 2204 if (!STm->defined) {
2204 cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1]; 2205 cd0 = STm->cdevs[0];
2206 cd1 = STm->cdevs[1];
2207 d0 = STm->devs[0];
2208 d1 = STm->devs[1];
2205 memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); 2209 memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
2206 STm->cdevs[0] = cd0; STm->cdevs[1] = cd1; 2210 STm->cdevs[0] = cd0;
2211 STm->cdevs[1] = cd1;
2212 STm->devs[0] = d0;
2213 STm->devs[1] = d1;
2207 modes_defined = 1; 2214 modes_defined = 1;
2208 DEBC(printk(ST_DEB_MSG 2215 DEBC(printk(ST_DEB_MSG
2209 "%s: Initialized mode %d definition from mode 0\n", 2216 "%s: Initialized mode %d definition from mode 0\n",
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index f1e4b4148c75..a4abce9d526e 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -259,7 +259,7 @@ found:
259 instance->irq = NCR5380_probe_irq(instance, T128_IRQS); 259 instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
260 260
261 if (instance->irq != SCSI_IRQ_NONE) 261 if (instance->irq != SCSI_IRQ_NONE)
262 if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", 262 if (request_irq(instance->irq, t128_intr, 0, "t128",
263 instance)) { 263 instance)) {
264 printk("scsi%d : IRQ%d not free, interrupts disabled\n", 264 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
265 instance->host_no, instance->irq); 265 instance->host_no, instance->irq);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 9c216e563568..5a03bb3bcfef 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -873,7 +873,7 @@ static int port_detect \
873 873
874 /* Board detected, allocate its IRQ */ 874 /* Board detected, allocate its IRQ */
875 if (request_irq(irq, do_interrupt_handler, 875 if (request_irq(irq, do_interrupt_handler,
876 IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), 876 (subversion == ESA) ? IRQF_SHARED : 0,
877 driver_name, (void *) &sha[j])) { 877 driver_name, (void *) &sha[j])) {
878 printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); 878 printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
879 goto freelock; 879 goto freelock;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9755ec0e812..c88e1468aad7 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linux driver for VMware's para-virtualized SCSI HBA. 2 * Linux driver for VMware's para-virtualized SCSI HBA.
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -32,6 +32,7 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h> 34#include <scsi/scsi_device.h>
35#include <scsi/scsi_tcq.h>
35 36
36#include "vmw_pvscsi.h" 37#include "vmw_pvscsi.h"
37 38
@@ -44,7 +45,7 @@ MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
44 45
45#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 46#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
46#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 47#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
47#define PVSCSI_DEFAULT_QUEUE_DEPTH 64 48#define PVSCSI_DEFAULT_QUEUE_DEPTH 254
48#define SGL_SIZE PAGE_SIZE 49#define SGL_SIZE PAGE_SIZE
49 50
50struct pvscsi_sg_list { 51struct pvscsi_sg_list {
@@ -62,6 +63,7 @@ struct pvscsi_ctx {
62 dma_addr_t dataPA; 63 dma_addr_t dataPA;
63 dma_addr_t sensePA; 64 dma_addr_t sensePA;
64 dma_addr_t sglPA; 65 dma_addr_t sglPA;
66 struct completion *abort_cmp;
65}; 67};
66 68
67struct pvscsi_adapter { 69struct pvscsi_adapter {
@@ -71,6 +73,7 @@ struct pvscsi_adapter {
71 bool use_msi; 73 bool use_msi;
72 bool use_msix; 74 bool use_msix;
73 bool use_msg; 75 bool use_msg;
76 bool use_req_threshold;
74 77
75 spinlock_t hw_lock; 78 spinlock_t hw_lock;
76 79
@@ -102,18 +105,22 @@ struct pvscsi_adapter {
102 105
103 106
104/* Command line parameters */ 107/* Command line parameters */
105static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; 108static int pvscsi_ring_pages;
106static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; 109static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
107static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; 110static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
108static bool pvscsi_disable_msi; 111static bool pvscsi_disable_msi;
109static bool pvscsi_disable_msix; 112static bool pvscsi_disable_msix;
110static bool pvscsi_use_msg = true; 113static bool pvscsi_use_msg = true;
114static bool pvscsi_use_req_threshold = true;
111 115
112#define PVSCSI_RW (S_IRUSR | S_IWUSR) 116#define PVSCSI_RW (S_IRUSR | S_IWUSR)
113 117
114module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); 118module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
115MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" 119MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
116 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); 120 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
121 "[up to 16 targets],"
122 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
123 "[for 16+ targets])");
117 124
118module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); 125module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
119MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" 126MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
@@ -121,7 +128,7 @@ MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
121 128
122module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); 129module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
123MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" 130MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
124 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); 131 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
125 132
126module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); 133module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
127MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 134MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -132,6 +139,10 @@ MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
132module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); 139module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
133MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); 140MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
134 141
142module_param_named(use_req_threshold, pvscsi_use_req_threshold,
143 bool, PVSCSI_RW);
144MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
145
135static const struct pci_device_id pvscsi_pci_tbl[] = { 146static const struct pci_device_id pvscsi_pci_tbl[] = {
136 { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, 147 { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
137 { 0 } 148 { 0 }
@@ -177,6 +188,7 @@ static void pvscsi_release_context(struct pvscsi_adapter *adapter,
177 struct pvscsi_ctx *ctx) 188 struct pvscsi_ctx *ctx)
178{ 189{
179 ctx->cmd = NULL; 190 ctx->cmd = NULL;
191 ctx->abort_cmp = NULL;
180 list_add(&ctx->list, &adapter->cmd_pool); 192 list_add(&ctx->list, &adapter->cmd_pool);
181} 193}
182 194
@@ -280,10 +292,15 @@ static int scsi_is_rw(unsigned char op)
280static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, 292static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
281 unsigned char op) 293 unsigned char op)
282{ 294{
283 if (scsi_is_rw(op)) 295 if (scsi_is_rw(op)) {
284 pvscsi_kick_rw_io(adapter); 296 struct PVSCSIRingsState *s = adapter->rings_state;
285 else 297
298 if (!adapter->use_req_threshold ||
299 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
300 pvscsi_kick_rw_io(adapter);
301 } else {
286 pvscsi_process_request_ring(adapter); 302 pvscsi_process_request_ring(adapter);
303 }
287} 304}
288 305
289static void ll_adapter_reset(const struct pvscsi_adapter *adapter) 306static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
@@ -487,6 +504,35 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
487 } 504 }
488} 505}
489 506
507static int pvscsi_change_queue_depth(struct scsi_device *sdev,
508 int qdepth,
509 int reason)
510{
511 int max_depth;
512 struct Scsi_Host *shost = sdev->host;
513
514 if (reason != SCSI_QDEPTH_DEFAULT)
515 /*
516 * We support only changing default.
517 */
518 return -EOPNOTSUPP;
519
520 max_depth = shost->can_queue;
521 if (!sdev->tagged_supported)
522 max_depth = 1;
523 if (qdepth > max_depth)
524 qdepth = max_depth;
525 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
526
527 if (sdev->inquiry_len > 7)
528 sdev_printk(KERN_INFO, sdev,
529 "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
530 sdev->queue_depth, sdev->tagged_supported,
531 sdev->simple_tags, sdev->ordered_tags,
532 sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
533 return sdev->queue_depth;
534}
535
490/* 536/*
491 * Pull a completion descriptor off and pass the completion back 537 * Pull a completion descriptor off and pass the completion back
492 * to the SCSI mid layer. 538 * to the SCSI mid layer.
@@ -496,15 +542,27 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
496{ 542{
497 struct pvscsi_ctx *ctx; 543 struct pvscsi_ctx *ctx;
498 struct scsi_cmnd *cmd; 544 struct scsi_cmnd *cmd;
545 struct completion *abort_cmp;
499 u32 btstat = e->hostStatus; 546 u32 btstat = e->hostStatus;
500 u32 sdstat = e->scsiStatus; 547 u32 sdstat = e->scsiStatus;
501 548
502 ctx = pvscsi_get_context(adapter, e->context); 549 ctx = pvscsi_get_context(adapter, e->context);
503 cmd = ctx->cmd; 550 cmd = ctx->cmd;
551 abort_cmp = ctx->abort_cmp;
504 pvscsi_unmap_buffers(adapter, ctx); 552 pvscsi_unmap_buffers(adapter, ctx);
505 pvscsi_release_context(adapter, ctx); 553 pvscsi_release_context(adapter, ctx);
506 cmd->result = 0; 554 if (abort_cmp) {
555 /*
556 * The command was requested to be aborted. Just signal that
557 * the request completed and swallow the actual cmd completion
558 * here. The abort handler will post a completion for this
559 * command indicating that it got successfully aborted.
560 */
561 complete(abort_cmp);
562 return;
563 }
507 564
565 cmd->result = 0;
508 if (sdstat != SAM_STAT_GOOD && 566 if (sdstat != SAM_STAT_GOOD &&
509 (btstat == BTSTAT_SUCCESS || 567 (btstat == BTSTAT_SUCCESS ||
510 btstat == BTSTAT_LINKED_COMMAND_COMPLETED || 568 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
@@ -726,6 +784,8 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
726 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); 784 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
727 struct pvscsi_ctx *ctx; 785 struct pvscsi_ctx *ctx;
728 unsigned long flags; 786 unsigned long flags;
787 int result = SUCCESS;
788 DECLARE_COMPLETION_ONSTACK(abort_cmp);
729 789
730 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", 790 scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
731 adapter->host->host_no, cmd); 791 adapter->host->host_no, cmd);
@@ -748,13 +808,40 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
748 goto out; 808 goto out;
749 } 809 }
750 810
811 /*
812 * Mark that the command has been requested to be aborted and issue
813 * the abort.
814 */
815 ctx->abort_cmp = &abort_cmp;
816
751 pvscsi_abort_cmd(adapter, ctx); 817 pvscsi_abort_cmd(adapter, ctx);
818 spin_unlock_irqrestore(&adapter->hw_lock, flags);
819 /* Wait for 2 secs for the completion. */
820 wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
821 spin_lock_irqsave(&adapter->hw_lock, flags);
752 822
753 pvscsi_process_completion_ring(adapter); 823 if (!completion_done(&abort_cmp)) {
824 /*
825 * Failed to abort the command, unmark the fact that it
826 * was requested to be aborted.
827 */
828 ctx->abort_cmp = NULL;
829 result = FAILED;
830 scmd_printk(KERN_DEBUG, cmd,
831 "Failed to get completion for aborted cmd %p\n",
832 cmd);
833 goto out;
834 }
835
836 /*
837 * Successfully aborted the command.
838 */
839 cmd->result = (DID_ABORT << 16);
840 cmd->scsi_done(cmd);
754 841
755out: 842out:
756 spin_unlock_irqrestore(&adapter->hw_lock, flags); 843 spin_unlock_irqrestore(&adapter->hw_lock, flags);
757 return SUCCESS; 844 return result;
758} 845}
759 846
760/* 847/*
@@ -911,6 +998,7 @@ static struct scsi_host_template pvscsi_template = {
911 .dma_boundary = UINT_MAX, 998 .dma_boundary = UINT_MAX,
912 .max_sectors = 0xffff, 999 .max_sectors = 0xffff,
913 .use_clustering = ENABLE_CLUSTERING, 1000 .use_clustering = ENABLE_CLUSTERING,
1001 .change_queue_depth = pvscsi_change_queue_depth,
914 .eh_abort_handler = pvscsi_abort, 1002 .eh_abort_handler = pvscsi_abort,
915 .eh_device_reset_handler = pvscsi_device_reset, 1003 .eh_device_reset_handler = pvscsi_device_reset,
916 .eh_bus_reset_handler = pvscsi_bus_reset, 1004 .eh_bus_reset_handler = pvscsi_bus_reset,
@@ -1034,6 +1122,34 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1034 return 1; 1122 return 1;
1035} 1123}
1036 1124
1125static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
1126 bool enable)
1127{
1128 u32 val;
1129
1130 if (!pvscsi_use_req_threshold)
1131 return false;
1132
1133 pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1134 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
1135 val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
1136 if (val == -1) {
1137 printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
1138 return false;
1139 } else {
1140 struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
1141 cmd_msg.enable = enable;
1142 printk(KERN_INFO
1143 "vmw_pvscsi: %sabling reqCallThreshold\n",
1144 enable ? "en" : "dis");
1145 pvscsi_write_cmd_desc(adapter,
1146 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
1147 &cmd_msg, sizeof(cmd_msg));
1148 return pvscsi_reg_read(adapter,
1149 PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
1150 }
1151}
1152
1037static irqreturn_t pvscsi_isr(int irq, void *devp) 1153static irqreturn_t pvscsi_isr(int irq, void *devp)
1038{ 1154{
1039 struct pvscsi_adapter *adapter = devp; 1155 struct pvscsi_adapter *adapter = devp;
@@ -1236,11 +1352,12 @@ exit:
1236static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1352static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1237{ 1353{
1238 struct pvscsi_adapter *adapter; 1354 struct pvscsi_adapter *adapter;
1239 struct Scsi_Host *host; 1355 struct pvscsi_adapter adapter_temp;
1240 struct device *dev; 1356 struct Scsi_Host *host = NULL;
1241 unsigned int i; 1357 unsigned int i;
1242 unsigned long flags = 0; 1358 unsigned long flags = 0;
1243 int error; 1359 int error;
1360 u32 max_id;
1244 1361
1245 error = -ENODEV; 1362 error = -ENODEV;
1246 1363
@@ -1258,34 +1375,19 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1258 goto out_disable_device; 1375 goto out_disable_device;
1259 } 1376 }
1260 1377
1261 pvscsi_template.can_queue = 1378 /*
1262 min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * 1379 * Let's use a temp pvscsi_adapter struct until we find the number of
1263 PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; 1380 * targets on the adapter, after that we will switch to the real
1264 pvscsi_template.cmd_per_lun = 1381 * allocated struct.
1265 min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); 1382 */
1266 host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); 1383 adapter = &adapter_temp;
1267 if (!host) {
1268 printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1269 goto out_disable_device;
1270 }
1271
1272 adapter = shost_priv(host);
1273 memset(adapter, 0, sizeof(*adapter)); 1384 memset(adapter, 0, sizeof(*adapter));
1274 adapter->dev = pdev; 1385 adapter->dev = pdev;
1275 adapter->host = host;
1276
1277 spin_lock_init(&adapter->hw_lock);
1278
1279 host->max_channel = 0;
1280 host->max_id = 16;
1281 host->max_lun = 1;
1282 host->max_cmd_len = 16;
1283
1284 adapter->rev = pdev->revision; 1386 adapter->rev = pdev->revision;
1285 1387
1286 if (pci_request_regions(pdev, "vmw_pvscsi")) { 1388 if (pci_request_regions(pdev, "vmw_pvscsi")) {
1287 printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); 1389 printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1288 goto out_free_host; 1390 goto out_disable_device;
1289 } 1391 }
1290 1392
1291 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1393 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1301,7 +1403,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1301 if (i == DEVICE_COUNT_RESOURCE) { 1403 if (i == DEVICE_COUNT_RESOURCE) {
1302 printk(KERN_ERR 1404 printk(KERN_ERR
1303 "vmw_pvscsi: adapter has no suitable MMIO region\n"); 1405 "vmw_pvscsi: adapter has no suitable MMIO region\n");
1304 goto out_release_resources; 1406 goto out_release_resources_and_disable;
1305 } 1407 }
1306 1408
1307 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); 1409 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
@@ -1310,10 +1412,60 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1310 printk(KERN_ERR 1412 printk(KERN_ERR
1311 "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", 1413 "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1312 i, PVSCSI_MEM_SPACE_SIZE); 1414 i, PVSCSI_MEM_SPACE_SIZE);
1313 goto out_release_resources; 1415 goto out_release_resources_and_disable;
1314 } 1416 }
1315 1417
1316 pci_set_master(pdev); 1418 pci_set_master(pdev);
1419
1420 /*
1421 * Ask the device for max number of targets before deciding the
1422 * default pvscsi_ring_pages value.
1423 */
1424 max_id = pvscsi_get_max_targets(adapter);
1425 printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
1426
1427 if (pvscsi_ring_pages == 0)
1428 /*
1429 * Set the right default value. Up to 16 it is 8, above it is
1430 * max.
1431 */
1432 pvscsi_ring_pages = (max_id > 16) ?
1433 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
1434 PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
1435 printk(KERN_INFO
1436 "vmw_pvscsi: setting ring_pages to %d\n",
1437 pvscsi_ring_pages);
1438
1439 pvscsi_template.can_queue =
1440 min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1441 PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1442 pvscsi_template.cmd_per_lun =
1443 min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1444 host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1445 if (!host) {
1446 printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1447 goto out_release_resources_and_disable;
1448 }
1449
1450 /*
1451 * Let's use the real pvscsi_adapter struct here onwards.
1452 */
1453 adapter = shost_priv(host);
1454 memset(adapter, 0, sizeof(*adapter));
1455 adapter->dev = pdev;
1456 adapter->host = host;
1457 /*
1458 * Copy back what we already have to the allocated adapter struct.
1459 */
1460 adapter->rev = adapter_temp.rev;
1461 adapter->mmioBase = adapter_temp.mmioBase;
1462
1463 spin_lock_init(&adapter->hw_lock);
1464 host->max_channel = 0;
1465 host->max_lun = 1;
1466 host->max_cmd_len = 16;
1467 host->max_id = max_id;
1468
1317 pci_set_drvdata(pdev, host); 1469 pci_set_drvdata(pdev, host);
1318 1470
1319 ll_adapter_reset(adapter); 1471 ll_adapter_reset(adapter);
@@ -1327,13 +1479,6 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1327 } 1479 }
1328 1480
1329 /* 1481 /*
1330 * Ask the device for max number of targets.
1331 */
1332 host->max_id = pvscsi_get_max_targets(adapter);
1333 dev = pvscsi_dev(adapter);
1334 dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
1335
1336 /*
1337 * From this point on we should reset the adapter if anything goes 1482 * From this point on we should reset the adapter if anything goes
1338 * wrong. 1483 * wrong.
1339 */ 1484 */
@@ -1373,6 +1518,10 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1373 flags = IRQF_SHARED; 1518 flags = IRQF_SHARED;
1374 } 1519 }
1375 1520
1521 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
1522 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
1523 adapter->use_req_threshold ? "en" : "dis");
1524
1376 error = request_irq(adapter->irq, pvscsi_isr, flags, 1525 error = request_irq(adapter->irq, pvscsi_isr, flags,
1377 "vmw_pvscsi", adapter); 1526 "vmw_pvscsi", adapter);
1378 if (error) { 1527 if (error) {
@@ -1402,12 +1551,15 @@ out_reset_adapter:
1402 ll_adapter_reset(adapter); 1551 ll_adapter_reset(adapter);
1403out_release_resources: 1552out_release_resources:
1404 pvscsi_release_resources(adapter); 1553 pvscsi_release_resources(adapter);
1405out_free_host:
1406 scsi_host_put(host); 1554 scsi_host_put(host);
1407out_disable_device: 1555out_disable_device:
1408 pci_disable_device(pdev); 1556 pci_disable_device(pdev);
1409 1557
1410 return error; 1558 return error;
1559
1560out_release_resources_and_disable:
1561 pvscsi_release_resources(adapter);
1562 goto out_disable_device;
1411} 1563}
1412 1564
1413static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) 1565static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 3546e8662e30..ce4588851274 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * VMware PVSCSI header file 2 * VMware PVSCSI header file
3 * 3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -26,7 +26,7 @@
26 26
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k" 29#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k"
30 30
31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
32 32
@@ -117,8 +117,9 @@ enum PVSCSICommands {
117 PVSCSI_CMD_CONFIG = 7, 117 PVSCSI_CMD_CONFIG = 7,
118 PVSCSI_CMD_SETUP_MSG_RING = 8, 118 PVSCSI_CMD_SETUP_MSG_RING = 8,
119 PVSCSI_CMD_DEVICE_UNPLUG = 9, 119 PVSCSI_CMD_DEVICE_UNPLUG = 9,
120 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10,
120 121
121 PVSCSI_CMD_LAST = 10 /* has to be last */ 122 PVSCSI_CMD_LAST = 11 /* has to be last */
122}; 123};
123 124
124/* 125/*
@@ -141,6 +142,14 @@ struct PVSCSICmdDescConfigCmd {
141 u32 _pad; 142 u32 _pad;
142} __packed; 143} __packed;
143 144
145/*
146 * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD --
147 */
148
149struct PVSCSICmdDescSetupReqCall {
150 u32 enable;
151} __packed;
152
144enum PVSCSIConfigPageType { 153enum PVSCSIConfigPageType {
145 PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958, 154 PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
146 PVSCSI_CONFIG_PAGE_PHY = 0x1959, 155 PVSCSI_CONFIG_PAGE_PHY = 0x1959,
@@ -261,7 +270,9 @@ struct PVSCSIRingsState {
261 u32 cmpConsIdx; 270 u32 cmpConsIdx;
262 u32 cmpNumEntriesLog2; 271 u32 cmpNumEntriesLog2;
263 272
264 u8 _pad[104]; 273 u32 reqCallThreshold;
274
275 u8 _pad[100];
265 276
266 u32 msgProdIdx; 277 u32 msgProdIdx;
267 u32 msgConsIdx; 278 u32 msgConsIdx;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index f9a6e4b0affe..32674236fec7 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1252,7 +1252,7 @@ static int wd7000_init(Adapter * host)
1252 return 0; 1252 return 0;
1253 1253
1254 1254
1255 if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) { 1255 if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
1256 printk("wd7000_init: can't get IRQ %d.\n", host->irq); 1256 printk("wd7000_init: can't get IRQ %d.\n", host->irq);
1257 return (0); 1257 return (0);
1258 } 1258 }