aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 20:43:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 20:43:10 -0500
commit590cf28580c999c8ba70dc39b40bab09d69e2630 (patch)
tree22b9aa4b148bea8a310b760521d1032eef7d743f /drivers/scsi
parentf54a6ec0fd85002d94d05b4bb679508eeb066683 (diff)
parentfb5edd020fa0fbe991f4a473611ad530d2237425 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (104 commits) [SCSI] fcoe: fix configuration problems [SCSI] cxgb3i: fix select/depend problem [SCSI] fcoe: fix incorrect use of struct module [SCSI] cxgb3i: remove use of skb->sp [SCSI] cxgb3i: Add cxgb3i iSCSI driver. [SCSI] zfcp: Remove unnecessary warning message [SCSI] zfcp: Add support for unchained FSF requests [SCSI] zfcp: Remove busid macro [SCSI] zfcp: remove DID_DID flag [SCSI] zfcp: Simplify mask lookups for incoming RSCNs [SCSI] zfcp: Remove initial device data from zfcp_data [SCSI] zfcp: fix compile warning [SCSI] zfcp: Remove adapter list [SCSI] zfcp: Simplify SBAL allocation to fix sparse warnings [SCSI] zfcp: register with SCSI layer on ccw registration [SCSI] zfcp: Fix message line break [SCSI] qla2xxx: changes in multiq code [SCSI] eata: fix the data buffer accessors conversion regression [SCSI] ibmvfc: Improve async event handling [SCSI] lpfc : correct printk types on PPC compiles ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c14
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/advansys.c3
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/ch.c2
-rw-r--r--drivers/scsi/cxgb3i/Kbuild4
-rw-r--r--drivers/scsi/cxgb3i/Kconfig7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h139
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c770
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h306
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c107
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c951
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1810
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h231
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c402
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c18
-rw-r--r--drivers/scsi/eata.c15
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/Makefile8
-rw-r--r--drivers/scsi/fcoe/fc_transport_fcoe.c446
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c494
-rw-r--r--drivers/scsi/fcoe/libfcoe.c1510
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c293
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/initio.h2
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c1621
-rw-r--r--drivers/scsi/iscsi_tcp.h88
-rw-r--r--drivers/scsi/libfc/Makefile12
-rw-r--r--drivers/scsi/libfc/fc_disc.c845
-rw-r--r--drivers/scsi/libfc/fc_elsct.c71
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
-rw-r--r--drivers/scsi/libfc/fc_frame.c89
-rw-r--r--drivers/scsi/libfc/fc_lport.c1604
-rw-r--r--drivers/scsi/libfc/fc_rport.c1291
-rw-r--r--drivers/scsi/libiscsi.c236
-rw-r--r--drivers/scsi/libiscsi_tcp.c1163
-rw-r--r--drivers/scsi/lpfc/lpfc.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c169
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c164
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c160
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h249
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c685
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1235
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c159
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c28
-rw-r--r--drivers/scsi/mac_esp.c100
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c328
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h584
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c481
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c840
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c886
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c516
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1471
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/scsi_lib.c149
-rw-r--r--drivers/scsi/scsi_scan.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c13
-rw-r--r--drivers/scsi/scsi_transport_spi.c5
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/ses.c9
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c245
-rw-r--r--drivers/scsi/stex.c3
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c3
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/wd7000.c4
115 files changed, 24604 insertions, 5406 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 403ecad48d4b..152d4aa9354f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -352,6 +352,8 @@ config ISCSI_TCP
352 352
353 http://open-iscsi.org 353 http://open-iscsi.org
354 354
355source "drivers/scsi/cxgb3i/Kconfig"
356
355config SGIWD93_SCSI 357config SGIWD93_SCSI
356 tristate "SGI WD93C93 SCSI Driver" 358 tristate "SGI WD93C93 SCSI Driver"
357 depends on SGI_HAS_WD93 && SCSI 359 depends on SGI_HAS_WD93 && SCSI
@@ -603,6 +605,19 @@ config SCSI_FLASHPOINT
603 substantial, so users of MultiMaster Host Adapters may not 605 substantial, so users of MultiMaster Host Adapters may not
604 wish to include it. 606 wish to include it.
605 607
608config LIBFC
609 tristate "LibFC module"
610 select SCSI_FC_ATTRS
611 ---help---
612 Fibre Channel library module
613
614config FCOE
615 tristate "FCoE module"
616 depends on PCI
617 select LIBFC
618 ---help---
619 Fibre Channel over Ethernet module
620
606config SCSI_DMX3191D 621config SCSI_DMX3191D
607 tristate "DMX3191D SCSI support" 622 tristate "DMX3191D SCSI support"
608 depends on PCI && SCSI 623 depends on PCI && SCSI
@@ -1357,6 +1372,13 @@ config SCSI_LPFC
1357 This lpfc driver supports the Emulex LightPulse 1372 This lpfc driver supports the Emulex LightPulse
1358 Family of Fibre Channel PCI host adapters. 1373 Family of Fibre Channel PCI host adapters.
1359 1374
1375config SCSI_LPFC_DEBUG_FS
1376 bool "Emulex LightPulse Fibre Channel debugfs Support"
1377 depends on SCSI_LPFC && DEBUG_FS
1378 help
1379 This makes debugging infomation from the lpfc driver
1380 available via the debugfs filesystem.
1381
1360config SCSI_SIM710 1382config SCSI_SIM710
1361 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1383 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
1362 depends on (EISA || MCA) && SCSI 1384 depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd5043cfa1..1410697257cb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o 36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
37obj-$(CONFIG_SCSI_DH) += device_handler/ 37obj-$(CONFIG_SCSI_DH) += device_handler/
38 38
39obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 39obj-$(CONFIG_LIBFC) += libfc/
40obj-$(CONFIG_FCOE) += fcoe/
41obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
40obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 42obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
41obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 43obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
42obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o 44obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
@@ -124,6 +126,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
124obj-$(CONFIG_SCSI_STEX) += stex.o 126obj-$(CONFIG_SCSI_STEX) += stex.o
125obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 127obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
126obj-$(CONFIG_PS3_ROM) += ps3rom.o 128obj-$(CONFIG_PS3_ROM) += ps3rom.o
129obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
127 130
128obj-$(CONFIG_ARM) += arm/ 131obj-$(CONFIG_ARM) += arm/
129 132
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index eeddbd19eba5..f92da9fd5f20 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -30,7 +30,7 @@
30 * $Log: NCR5380.c,v $ 30 * $Log: NCR5380.c,v $
31 31
32 * Revision 1.10 1998/9/2 Alan Cox 32 * Revision 1.10 1998/9/2 Alan Cox
33 * (alan@redhat.com) 33 * (alan@lxorguk.ukuu.org.uk)
34 * Fixed up the timer lockups reported so far. Things still suck. Looking 34 * Fixed up the timer lockups reported so far. Things still suck. Looking
35 * forward to 2.3 and per device request queues. Then it'll be possible to 35 * forward to 2.3 and per device request queues. Then it'll be possible to
36 * SMP thread this beast and improve life no end. 36 * SMP thread this beast and improve life no end.
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 84bb61628372..3c298c7253ee 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -54,7 +54,7 @@
54 * 9/28/04 Christoph Hellwig <hch@lst.de> 54 * 9/28/04 Christoph Hellwig <hch@lst.de>
55 * - merge the two source files 55 * - merge the two source files
56 * - remove internal queueing code 56 * - remove internal queueing code
57 * 14/06/07 Alan Cox <alan@redhat.com> 57 * 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
58 * - Grand cleanup and Linuxisation 58 * - Grand cleanup and Linuxisation
59 */ 59 */
60 60
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8abfd06b5a72..90d1d0878cb8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index a7355260cfcf..0391d759dfdb 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
@@ -90,14 +90,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
90 if (size < le16_to_cpu(kfib->header.SenderSize)) 90 if (size < le16_to_cpu(kfib->header.SenderSize))
91 size = le16_to_cpu(kfib->header.SenderSize); 91 size = le16_to_cpu(kfib->header.SenderSize);
92 if (size > dev->max_fib_size) { 92 if (size > dev->max_fib_size) {
93 dma_addr_t daddr;
94
93 if (size > 2048) { 95 if (size > 2048) {
94 retval = -EINVAL; 96 retval = -EINVAL;
95 goto cleanup; 97 goto cleanup;
96 } 98 }
99
100 kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
101 if (!kfib) {
102 retval = -ENOMEM;
103 goto cleanup;
104 }
105
97 /* Highjack the hw_fib */ 106 /* Highjack the hw_fib */
98 hw_fib = fibptr->hw_fib_va; 107 hw_fib = fibptr->hw_fib_va;
99 hw_fib_pa = fibptr->hw_fib_pa; 108 hw_fib_pa = fibptr->hw_fib_pa;
100 fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa); 109 fibptr->hw_fib_va = kfib;
110 fibptr->hw_fib_pa = daddr;
101 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 111 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
102 memcpy(kfib, hw_fib, dev->max_fib_size); 112 memcpy(kfib, hw_fib, dev->max_fib_size);
103 } 113 }
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index cbac06355107..16310443b55a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 289304aab690..d24c2670040b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 933f208eedba..abc9ef5d1b10 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 94acbeed4e7c..36d8aab97efe 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 8cd6588a83e3..16d8db550027 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 073208b0f622..f70d9f8e79e5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index fc1a55796a89..b6a3c5c187b6 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Adaptec AAC series RAID controller driver 2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 3 * (c) Copyright 2001 Red Hat Inc.
4 * 4 *
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 399fe559e4de..2f602720193e 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -13425,8 +13425,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
13425 } 13425 }
13426 13426
13427 boardp->asc_n_io_port = pci_resource_len(pdev, 1); 13427 boardp->asc_n_io_port = pci_resource_len(pdev, 1);
13428 boardp->ioremap_addr = ioremap(pci_resource_start(pdev, 1), 13428 boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
13429 boardp->asc_n_io_port);
13430 if (!boardp->ioremap_addr) { 13429 if (!boardp->ioremap_addr) {
13431 shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) " 13430 shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
13432 "returned NULL\n", 13431 "returned NULL\n",
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 7c45d88a205b..ed0e3e55652a 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -22,7 +22,7 @@
22 * aha1740_makecode may still need even more work 22 * aha1740_makecode may still need even more work
23 * if it doesn't work for your devices, take a look. 23 * if it doesn't work for your devices, take a look.
24 * 24 *
25 * Reworked for new_eh and new locking by Alan Cox <alan@redhat.com> 25 * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
26 * 26 *
27 * Converted to EISA and generic DMA APIs by Marc Zyngier 27 * Converted to EISA and generic DMA APIs by Marc Zyngier
28 * <maz@wild-wind.fr.eu.org>, 4/2003. 28 * <maz@wild-wind.fr.eu.org>, 4/2003.
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f91f79c8007d..106c04d2d793 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -235,7 +235,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
235 uint32_t intmask_org; 235 uint32_t intmask_org;
236 int i, j; 236 int i, j;
237 237
238 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 238 acb->pmuA = pci_ioremap_bar(pdev, 0);
239 if (!acb->pmuA) { 239 if (!acb->pmuA) {
240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 240 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
241 acb->host->host_no); 241 acb->host->host_no);
@@ -329,13 +329,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
329 reg = (struct MessageUnit_B *)(dma_coherent + 329 reg = (struct MessageUnit_B *)(dma_coherent +
330 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 330 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
331 acb->pmuB = reg; 331 acb->pmuB = reg;
332 mem_base0 = ioremap(pci_resource_start(pdev, 0), 332 mem_base0 = pci_ioremap_bar(pdev, 0);
333 pci_resource_len(pdev, 0));
334 if (!mem_base0) 333 if (!mem_base0)
335 goto out; 334 goto out;
336 335
337 mem_base1 = ioremap(pci_resource_start(pdev, 2), 336 mem_base1 = pci_ioremap_bar(pdev, 2);
338 pci_resource_len(pdev, 2));
339 if (!mem_base1) { 337 if (!mem_base1) {
340 iounmap(mem_base0); 338 iounmap(mem_base0);
341 goto out; 339 goto out;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7d311541c76c..20ca0a6374b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright (C) 1997 Wu Ching Chen 2 * Copyright (C) 1997 Wu Ching Chen
3 * 2.1.x update (C) 1998 Krzysztof G. Baranowski 3 * 2.1.x update (C) 1998 Krzysztof G. Baranowski
4 * 2.5.x update (C) 2002 Red Hat <alan@redhat.com> 4 * 2.5.x update (C) 2002 Red Hat
5 * 2.6.x update (C) 2004 Red Hat <alan@redhat.com> 5 * 2.6.x update (C) 2004 Red Hat
6 * 6 *
7 * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes 7 * Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
8 * 8 *
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 88ecf94ad979..af9725409f43 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -190,7 +190,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
190 190
191 result = scsi_execute_req(ch->device, cmd, direction, buffer, 191 result = scsi_execute_req(ch->device, cmd, direction, buffer,
192 buflength, &sshdr, timeout * HZ, 192 buflength, &sshdr, timeout * HZ,
193 MAX_RETRIES); 193 MAX_RETRIES, NULL);
194 194
195 dprintk("result: 0x%x\n",result); 195 dprintk("result: 0x%x\n",result);
196 if (driver_byte(result) & DRIVER_SENSE) { 196 if (driver_byte(result) & DRIVER_SENSE) {
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
new file mode 100644
index 000000000000..ee7d6d2f9c3b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -0,0 +1,4 @@
1EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
2
3cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
4obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgb3i/Kconfig
new file mode 100644
index 000000000000..bfdcaf5c9c57
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_CXGB3_ISCSI
2 tristate "Chelsio S3xx iSCSI support"
3 depends on CHELSIO_T3_DEPENDS
4 select CHELSIO_T3
5 select SCSI_ISCSI_ATTRS
6 ---help---
7 This driver supports iSCSI offload for the Chelsio S3 series devices.
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..fde6e4c634e7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -0,0 +1,139 @@
1/*
2 * cxgb3i.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_H__
14#define __CXGB3I_H__
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <scsi/libiscsi_tcp.h>
24
25/* from cxgb3 LLD */
26#include "common.h"
27#include "t3_cpl.h"
28#include "t3cdev.h"
29#include "cxgb3_ctl_defs.h"
30#include "cxgb3_offload.h"
31#include "firmware_exports.h"
32
33#include "cxgb3i_offload.h"
34#include "cxgb3i_ddp.h"
35
36#define CXGB3I_SCSI_QDEPTH_DFLT 128
37#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
38#define CXGB3I_MAX_LUN 512
39#define ISCSI_PDU_NONPAYLOAD_MAX \
40 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
41
42struct cxgb3i_adapter;
43struct cxgb3i_hba;
44struct cxgb3i_endpoint;
45
46/**
47 * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
48 *
49 * @snic: cxgb3i adapter containing this port
50 * @ndev: pointer to netdev structure
51 * @shost: pointer to scsi host structure
52 */
53struct cxgb3i_hba {
54 struct cxgb3i_adapter *snic;
55 struct net_device *ndev;
56 struct Scsi_Host *shost;
57};
58
59/**
60 * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
61 *
62 * @listhead: list head to link elements
63 * @lock: lock for this structure
64 * @tdev: pointer to t3cdev used by cxgb3 driver
65 * @pdev: pointer to pci dev
66 * @hba_cnt: # of hbas (the same as # of ports)
67 * @hba: all the hbas on this adapter
68 * @tx_max_size: max. tx packet size supported
69 * @rx_max_size: max. rx packet size supported
70 * @tag_format: ddp tag format settings
71 */
72struct cxgb3i_adapter {
73 struct list_head list_head;
74 spinlock_t lock;
75 struct t3cdev *tdev;
76 struct pci_dev *pdev;
77 unsigned char hba_cnt;
78 struct cxgb3i_hba *hba[MAX_NPORTS];
79
80 unsigned int tx_max_size;
81 unsigned int rx_max_size;
82
83 struct cxgb3i_tag_format tag_format;
84};
85
86/**
87 * struct cxgb3i_conn - cxgb3i iscsi connection
88 *
89 * @listhead: list head to link elements
90 * @cep: pointer to iscsi_endpoint structure
91 * @conn: pointer to iscsi_conn structure
92 * @hba: pointer to the hba this conn. is going through
93 * @task_idx_bits: # of bits needed for session->cmds_max
94 */
95struct cxgb3i_conn {
96 struct list_head list_head;
97 struct cxgb3i_endpoint *cep;
98 struct iscsi_conn *conn;
99 struct cxgb3i_hba *hba;
100 unsigned int task_idx_bits;
101};
102
103/**
104 * struct cxgb3i_endpoint - iscsi tcp endpoint
105 *
106 * @c3cn: the h/w tcp connection representation
107 * @hba: pointer to the hba this conn. is going through
108 * @cconn: pointer to the associated cxgb3i iscsi connection
109 */
110struct cxgb3i_endpoint {
111 struct s3_conn *c3cn;
112 struct cxgb3i_hba *hba;
113 struct cxgb3i_conn *cconn;
114};
115
116int cxgb3i_iscsi_init(void);
117void cxgb3i_iscsi_cleanup(void);
118
119struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
120void cxgb3i_adapter_remove(struct t3cdev *);
121int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
122void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
123
124struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
125struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
126 struct net_device *);
127void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
128
129int cxgb3i_pdu_init(void);
130void cxgb3i_pdu_cleanup(void);
131void cxgb3i_conn_cleanup_task(struct iscsi_task *);
132int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
133int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
134int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
135
136void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
137int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
138
139#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
new file mode 100644
index 000000000000..1a41f04264f7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -0,0 +1,770 @@
1/*
2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/skbuff.h>
14
15/* from cxgb3 LLD */
16#include "common.h"
17#include "t3_cpl.h"
18#include "t3cdev.h"
19#include "cxgb3_ctl_defs.h"
20#include "cxgb3_offload.h"
21#include "firmware_exports.h"
22
23#include "cxgb3i_ddp.h"
24
25#define DRV_MODULE_NAME "cxgb3i_ddp"
26#define DRV_MODULE_VERSION "1.0.0"
27#define DRV_MODULE_RELDATE "Dec. 1, 2008"
28
29static char version[] =
30 "Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
31 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
32
33MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
34MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
35MODULE_LICENSE("GPL");
36MODULE_VERSION(DRV_MODULE_VERSION);
37
38#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
39#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
40#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
41
42#ifdef __DEBUG_CXGB3I_DDP__
43#define ddp_log_debug(fmt, args...) \
44 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
45#else
46#define ddp_log_debug(fmt...)
47#endif
48
49/*
50 * iSCSI Direct Data Placement
51 *
52 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
53 * pre-posted final destination host-memory buffers based on the Initiator
54 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
55 *
56 * The host memory address is programmed into h/w in the format of pagepod
57 * entries.
58 * The location of the pagepod entry is encoded into ddp tag which is used or
59 * is the base for ITT/TTT.
60 */
61
62#define DDP_PGIDX_MAX 4
63#define DDP_THRESHOLD 2048
64static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
65static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
66static unsigned char page_idx = DDP_PGIDX_MAX;
67
68static LIST_HEAD(cxgb3i_ddp_list);
69static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
70
71/*
72 * functions to program the pagepod in h/w
73 */
74static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
75{
76 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
77
78 req->wr.wr_lo = 0;
79 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
80 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
81 V_ULPTX_CMD(ULP_MEM_WRITE));
82 req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
83 V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
84}
85
86static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
87 unsigned int idx, unsigned int npods,
88 struct cxgb3i_gather_list *gl)
89{
90 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
91 int i;
92
93 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
94 struct sk_buff *skb = ddp->gl_skb[idx];
95 struct pagepod *ppod;
96 int j, pidx;
97
98 /* hold on to the skb until we clear the ddp mapping */
99 skb_get(skb);
100
101 ulp_mem_io_set_hdr(skb, pm_addr);
102 ppod = (struct pagepod *)
103 (skb->head + sizeof(struct ulp_mem_io));
104 memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
105 for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
106 ppod->addr[j] = pidx < gl->nelem ?
107 cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
108
109 skb->priority = CPL_PRIORITY_CONTROL;
110 cxgb3_ofld_send(ddp->tdev, skb);
111 }
112 return 0;
113}
114
115static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
116 unsigned int npods)
117{
118 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
119 int i;
120
121 for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
122 struct sk_buff *skb = ddp->gl_skb[idx];
123
124 ddp->gl_skb[idx] = NULL;
125 memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
126 ulp_mem_io_set_hdr(skb, pm_addr);
127 skb->priority = CPL_PRIORITY_CONTROL;
128 cxgb3_ofld_send(ddp->tdev, skb);
129 }
130 return 0;
131}
132
133static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
134 int start, int max, int count,
135 struct cxgb3i_gather_list *gl)
136{
137 unsigned int i, j;
138
139 spin_lock(&ddp->map_lock);
140 for (i = start; i <= max;) {
141 for (j = 0; j < count; j++) {
142 if (ddp->gl_map[i + j])
143 break;
144 }
145 if (j == count) {
146 for (j = 0; j < count; j++)
147 ddp->gl_map[i + j] = gl;
148 spin_unlock(&ddp->map_lock);
149 return i;
150 }
151 i += j + 1;
152 }
153 spin_unlock(&ddp->map_lock);
154 return -EBUSY;
155}
156
157static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
158 int start, int count)
159{
160 spin_lock(&ddp->map_lock);
161 memset(&ddp->gl_map[start], 0,
162 count * sizeof(struct cxgb3i_gather_list *));
163 spin_unlock(&ddp->map_lock);
164}
165
166static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
167 int idx, int count)
168{
169 int i;
170
171 for (i = 0; i < count; i++, idx++)
172 if (ddp->gl_skb[idx]) {
173 kfree_skb(ddp->gl_skb[idx]);
174 ddp->gl_skb[idx] = NULL;
175 }
176}
177
178static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
179 int count, gfp_t gfp)
180{
181 int i;
182
183 for (i = 0; i < count; i++) {
184 struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
185 PPOD_SIZE, gfp);
186 if (skb) {
187 ddp->gl_skb[idx + i] = skb;
188 skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
189 } else {
190 ddp_free_gl_skb(ddp, idx, i);
191 return -ENOMEM;
192 }
193 }
194 return 0;
195}
196
197/**
198 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
199 * @pgsz: page size
200 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
201 */
202int cxgb3i_ddp_find_page_index(unsigned long pgsz)
203{
204 int i;
205
206 for (i = 0; i < DDP_PGIDX_MAX; i++) {
207 if (pgsz == (1UL << ddp_page_shift[i]))
208 return i;
209 }
210 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
211 return DDP_PGIDX_MAX;
212}
213EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
214
215static inline void ddp_gl_unmap(struct pci_dev *pdev,
216 struct cxgb3i_gather_list *gl)
217{
218 int i;
219
220 for (i = 0; i < gl->nelem; i++)
221 pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
222 PCI_DMA_FROMDEVICE);
223}
224
225static inline int ddp_gl_map(struct pci_dev *pdev,
226 struct cxgb3i_gather_list *gl)
227{
228 int i;
229
230 for (i = 0; i < gl->nelem; i++) {
231 gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
232 PAGE_SIZE,
233 PCI_DMA_FROMDEVICE);
234 if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
235 goto unmap;
236 }
237
238 return i;
239
240unmap:
241 if (i) {
242 unsigned int nelem = gl->nelem;
243
244 gl->nelem = i;
245 ddp_gl_unmap(pdev, gl);
246 gl->nelem = nelem;
247 }
248 return -ENOMEM;
249}
250
251/**
252 * cxgb3i_ddp_make_gl - build ddp page buffer list
253 * @xferlen: total buffer length
254 * @sgl: page buffer scatter-gather list
255 * @sgcnt: # of page buffers
256 * @pdev: pci_dev, used for pci map
257 * @gfp: allocation mode
258 *
259 * construct a ddp page buffer list from the scsi scattergather list.
260 * coalesce buffers as much as possible, and obtain dma addresses for
261 * each page.
262 *
263 * Return the cxgb3i_gather_list constructed from the page buffers if the
264 * memory can be used for ddp. Return NULL otherwise.
265 */
266struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
267 struct scatterlist *sgl,
268 unsigned int sgcnt,
269 struct pci_dev *pdev,
270 gfp_t gfp)
271{
272 struct cxgb3i_gather_list *gl;
273 struct scatterlist *sg = sgl;
274 struct page *sgpage = sg_page(sg);
275 unsigned int sglen = sg->length;
276 unsigned int sgoffset = sg->offset;
277 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
278 PAGE_SHIFT;
279 int i = 1, j = 0;
280
281 if (xferlen < DDP_THRESHOLD) {
282 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
283 xferlen, DDP_THRESHOLD);
284 return NULL;
285 }
286
287 gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
288 npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
289 gfp);
290 if (!gl)
291 return NULL;
292
293 gl->pages = (struct page **)&gl->phys_addr[npages];
294 gl->length = xferlen;
295 gl->offset = sgoffset;
296 gl->pages[0] = sgpage;
297
298 sg = sg_next(sg);
299 while (sg) {
300 struct page *page = sg_page(sg);
301
302 if (sgpage == page && sg->offset == sgoffset + sglen)
303 sglen += sg->length;
304 else {
305 /* make sure the sgl is fit for ddp:
306 * each has the same page size, and
307 * all of the middle pages are used completely
308 */
309 if ((j && sgoffset) ||
310 ((i != sgcnt - 1) &&
311 ((sglen + sgoffset) & ~PAGE_MASK)))
312 goto error_out;
313
314 j++;
315 if (j == gl->nelem || sg->offset)
316 goto error_out;
317 gl->pages[j] = page;
318 sglen = sg->length;
319 sgoffset = sg->offset;
320 sgpage = page;
321 }
322 i++;
323 sg = sg_next(sg);
324 }
325 gl->nelem = ++j;
326
327 if (ddp_gl_map(pdev, gl) < 0)
328 goto error_out;
329
330 return gl;
331
332error_out:
333 kfree(gl);
334 return NULL;
335}
336EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
337
338/**
339 * cxgb3i_ddp_release_gl - release a page buffer list
340 * @gl: a ddp page buffer list
341 * @pdev: pci_dev used for pci_unmap
342 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
343 */
344void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
345 struct pci_dev *pdev)
346{
347 ddp_gl_unmap(pdev, gl);
348 kfree(gl);
349}
350EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
351
352/**
353 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
354 * @tdev: t3cdev adapter
355 * @tid: connection id
356 * @tformat: tag format
357 * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
358 * ddp/hw tag
359 * @gl: the page momory list
360 * @gfp: allocation mode
361 *
362 * ddp setup for a given page buffer list and construct the ddp tag.
363 * return 0 if success, < 0 otherwise.
364 */
365int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
366 struct cxgb3i_tag_format *tformat, u32 *tagp,
367 struct cxgb3i_gather_list *gl, gfp_t gfp)
368{
369 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
370 struct pagepod_hdr hdr;
371 unsigned int npods;
372 int idx = -1, idx_max;
373 int err = -ENOMEM;
374 u32 sw_tag = *tagp;
375 u32 tag;
376
377 if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
378 gl->length < DDP_THRESHOLD) {
379 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
380 page_idx, gl->length, DDP_THRESHOLD);
381 return -EINVAL;
382 }
383
384 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
385 idx_max = ddp->nppods - npods + 1;
386
387 if (ddp->idx_last == ddp->nppods)
388 idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
389 else {
390 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
391 idx_max, npods, gl);
392 if (idx < 0 && ddp->idx_last >= npods)
393 idx = ddp_find_unused_entries(ddp, 0,
394 ddp->idx_last - npods + 1,
395 npods, gl);
396 }
397 if (idx < 0) {
398 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
399 gl->length, gl->nelem, npods);
400 return idx;
401 }
402
403 err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
404 if (err < 0)
405 goto unmark_entries;
406
407 tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
408 tag |= idx << PPOD_IDX_SHIFT;
409
410 hdr.rsvd = 0;
411 hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
412 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
413 hdr.maxoffset = htonl(gl->length);
414 hdr.pgoffset = htonl(gl->offset);
415
416 err = set_ddp_map(ddp, &hdr, idx, npods, gl);
417 if (err < 0)
418 goto free_gl_skb;
419
420 ddp->idx_last = idx;
421 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
422 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
423 idx, npods);
424 *tagp = tag;
425 return 0;
426
427free_gl_skb:
428 ddp_free_gl_skb(ddp, idx, npods);
429unmark_entries:
430 ddp_unmark_entries(ddp, idx, npods);
431 return err;
432}
433EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
434
435/**
436 * cxgb3i_ddp_tag_release - release a ddp tag
437 * @tdev: t3cdev adapter
438 * @tag: ddp tag
439 * ddp cleanup for a given ddp tag and release all the resources held
440 */
441void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
442{
443 struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
444 u32 idx;
445
446 if (!ddp) {
447 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
448 return;
449 }
450
451 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
452 if (idx < ddp->nppods) {
453 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
454 unsigned int npods;
455
456 if (!gl) {
457 ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
458 tag, idx);
459 return;
460 }
461 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
462 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
463 tag, idx, npods);
464 clear_ddp_map(ddp, idx, npods);
465 ddp_unmark_entries(ddp, idx, npods);
466 cxgb3i_ddp_release_gl(gl, ddp->pdev);
467 } else
468 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
469 tag, idx, ddp->nppods);
470}
471EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
472
473static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
474 int reply)
475{
476 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
477 GFP_KERNEL);
478 struct cpl_set_tcb_field *req;
479 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
480
481 if (!skb)
482 return -ENOMEM;
483
484 /* set up ulp submode and page size */
485 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
486 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
487 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
488 req->reply = V_NO_REPLY(reply ? 0 : 1);
489 req->cpu_idx = 0;
490 req->word = htons(31);
491 req->mask = cpu_to_be64(0xF0000000);
492 req->val = cpu_to_be64(val << 28);
493 skb->priority = CPL_PRIORITY_CONTROL;
494
495 cxgb3_ofld_send(tdev, skb);
496 return 0;
497}
498
499/**
500 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
501 * @tdev: t3cdev adapter
502 * @tid: connection id
503 * @reply: request reply from h/w
504 * set up the ddp page size based on the host PAGE_SIZE for a connection
505 * identified by tid
506 */
507int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
508 int reply)
509{
510 return setup_conn_pgidx(tdev, tid, page_idx, reply);
511}
512EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
513
514/**
515 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
516 * @tdev: t3cdev adapter
517 * @tid: connection id
518 * @reply: request reply from h/w
519 * @pgsz: ddp page size
520 * set up the ddp page size for a connection identified by tid
521 */
522int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
523 int reply, unsigned long pgsz)
524{
525 int pgidx = cxgb3i_ddp_find_page_index(pgsz);
526
527 return setup_conn_pgidx(tdev, tid, pgidx, reply);
528}
529EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
530
531/**
532 * cxgb3i_setup_conn_digest - setup conn. digest setting
533 * @tdev: t3cdev adapter
534 * @tid: connection id
535 * @hcrc: header digest enabled
536 * @dcrc: data digest enabled
537 * @reply: request reply from h/w
538 * set up the iscsi digest settings for a connection identified by tid
539 */
540int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
541 int hcrc, int dcrc, int reply)
542{
543 struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
544 GFP_KERNEL);
545 struct cpl_set_tcb_field *req;
546 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
547
548 if (!skb)
549 return -ENOMEM;
550
551 /* set up ulp submode and page size */
552 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
553 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
554 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
555 req->reply = V_NO_REPLY(reply ? 0 : 1);
556 req->cpu_idx = 0;
557 req->word = htons(31);
558 req->mask = cpu_to_be64(0x0F000000);
559 req->val = cpu_to_be64(val << 24);
560 skb->priority = CPL_PRIORITY_CONTROL;
561
562 cxgb3_ofld_send(tdev, skb);
563 return 0;
564}
565EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
566
567static int ddp_init(struct t3cdev *tdev)
568{
569 struct cxgb3i_ddp_info *ddp;
570 struct ulp_iscsi_info uinfo;
571 unsigned int ppmax, bits;
572 int i, err;
573 static int vers_printed;
574
575 if (!vers_printed) {
576 printk(KERN_INFO "%s", version);
577 vers_printed = 1;
578 }
579
580 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
581 if (err < 0) {
582 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
583 tdev->name, err);
584 return err;
585 }
586
587 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
588 bits = __ilog2_u32(ppmax) + 1;
589 if (bits > PPOD_IDX_MAX_SIZE)
590 bits = PPOD_IDX_MAX_SIZE;
591 ppmax = (1 << (bits - 1)) - 1;
592
593 ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
594 ppmax *
595 (sizeof(struct cxgb3i_gather_list *) +
596 sizeof(struct sk_buff *)),
597 GFP_KERNEL);
598 if (!ddp) {
599 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
600 tdev->name, ppmax);
601 return 0;
602 }
603 ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
604 ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
605 ppmax *
606 sizeof(struct cxgb3i_gather_list *));
607 spin_lock_init(&ddp->map_lock);
608
609 ddp->tdev = tdev;
610 ddp->pdev = uinfo.pdev;
611 ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
612 ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
613 ddp->llimit = uinfo.llimit;
614 ddp->ulimit = uinfo.ulimit;
615 ddp->nppods = ppmax;
616 ddp->idx_last = ppmax;
617 ddp->idx_bits = bits;
618 ddp->idx_mask = (1 << bits) - 1;
619 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
620
621 uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
622 for (i = 0; i < DDP_PGIDX_MAX; i++)
623 uinfo.pgsz_factor[i] = ddp_page_order[i];
624 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
625
626 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
627 if (err < 0) {
628 ddp_log_warn("%s unable to set iscsi param err=%d, "
629 "ddp disabled.\n", tdev->name, err);
630 goto free_ddp_map;
631 }
632
633 tdev->ulp_iscsi = ddp;
634
635 /* add to the list */
636 write_lock(&cxgb3i_ddp_rwlock);
637 list_add_tail(&ddp->list, &cxgb3i_ddp_list);
638 write_unlock(&cxgb3i_ddp_rwlock);
639
640 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
641 "pkt %u,%u.\n",
642 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
643 ddp->idx_mask, ddp->rsvd_tag_mask,
644 ddp->max_txsz, ddp->max_rxsz);
645 return 0;
646
647free_ddp_map:
648 cxgb3i_free_big_mem(ddp);
649 return err;
650}
651
652/**
653 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
654 * @tdev: t3cdev adapter
655 * @tformat: tag format
656 * @txsz: max tx pkt size, filled in by this func.
657 * @rxsz: max rx pkt size, filled in by this func.
658 * initialize the ddp pagepod manager for a given adapter if needed and
659 * setup the tag format for a given iscsi entity
660 */
661int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
662 struct cxgb3i_tag_format *tformat,
663 unsigned int *txsz, unsigned int *rxsz)
664{
665 struct cxgb3i_ddp_info *ddp;
666 unsigned char idx_bits;
667
668 if (!tformat)
669 return -EINVAL;
670
671 if (!tdev->ulp_iscsi) {
672 int err = ddp_init(tdev);
673 if (err < 0)
674 return err;
675 }
676 ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
677
678 idx_bits = 32 - tformat->sw_bits;
679 tformat->rsvd_bits = ddp->idx_bits;
680 tformat->rsvd_shift = PPOD_IDX_SHIFT;
681 tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
682
683 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
684 tformat->sw_bits, tformat->rsvd_bits,
685 tformat->rsvd_shift, tformat->rsvd_mask);
686
687 *txsz = ddp->max_txsz;
688 *rxsz = ddp->max_rxsz;
689 ddp_log_info("ddp max pkt size: %u, %u.\n",
690 ddp->max_txsz, ddp->max_rxsz);
691 return 0;
692}
693EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
694
695static void ddp_release(struct cxgb3i_ddp_info *ddp)
696{
697 int i = 0;
698 struct t3cdev *tdev = ddp->tdev;
699
700 tdev->ulp_iscsi = NULL;
701 while (i < ddp->nppods) {
702 struct cxgb3i_gather_list *gl = ddp->gl_map[i];
703 if (gl) {
704 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
705 >> PPOD_PAGES_SHIFT;
706
707 kfree(gl);
708 ddp_free_gl_skb(ddp, i, npods);
709 } else
710 i++;
711 }
712 cxgb3i_free_big_mem(ddp);
713}
714
715/**
716 * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
717 * @tdev: t3cdev adapter
718 * release all the resource held by the ddp pagepod manager for a given
719 * adapter if needed
720 */
721void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
722{
723 struct cxgb3i_ddp_info *ddp;
724
725 /* remove from the list */
726 write_lock(&cxgb3i_ddp_rwlock);
727 list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
728 if (ddp->tdev == tdev) {
729 list_del(&ddp->list);
730 break;
731 }
732 }
733 write_unlock(&cxgb3i_ddp_rwlock);
734
735 if (ddp)
736 ddp_release(ddp);
737}
738EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
739
740/**
741 * cxgb3i_ddp_init_module - module init entry point
742 * initialize any driver wide global data structures
743 */
744static int __init cxgb3i_ddp_init_module(void)
745{
746 page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
747 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
748 PAGE_SIZE, page_idx);
749 return 0;
750}
751
752/**
753 * cxgb3i_ddp_exit_module - module cleanup/exit entry point
754 * go through the ddp list and release any resource held.
755 */
756static void __exit cxgb3i_ddp_exit_module(void)
757{
758 struct cxgb3i_ddp_info *ddp;
759
760 /* release all ddp manager if there is any */
761 write_lock(&cxgb3i_ddp_rwlock);
762 list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
763 list_del(&ddp->list);
764 ddp_release(ddp);
765 }
766 write_unlock(&cxgb3i_ddp_rwlock);
767}
768
769module_init(cxgb3i_ddp_init_module);
770module_exit(cxgb3i_ddp_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
new file mode 100644
index 000000000000..5c7c4d95c493
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -0,0 +1,306 @@
1/*
2 * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__
15
16/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 *
19 * @sw_bits: # of bits used by iscsi software layer
20 * @rsvd_bits: # of bits used by h/w
21 * @rsvd_shift: h/w bits shift left
22 * @rsvd_mask: reserved bit mask
23 */
24struct cxgb3i_tag_format {
25 unsigned char sw_bits;
26 unsigned char rsvd_bits;
27 unsigned char rsvd_shift;
28 unsigned char filler[1];
29 u32 rsvd_mask;
30};
31
32/**
33 * struct cxgb3i_gather_list - cxgb3i direct data placement memory
34 *
35 * @tag: ddp tag
36 * @length: total data buffer length
37 * @offset: initial offset to the 1st page
38 * @nelem: # of pages
39 * @pages: page pointers
40 * @phys_addr: physical address
41 */
42struct cxgb3i_gather_list {
43 u32 tag;
44 unsigned int length;
45 unsigned int offset;
46 unsigned int nelem;
47 struct page **pages;
48 dma_addr_t phys_addr[0];
49};
50
51/**
52 * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
53 *
54 * @list: list head to link elements
55 * @tdev: pointer to t3cdev used by cxgb3 driver
56 * @max_txsz: max tx packet size for ddp
57 * @max_rxsz: max rx packet size for ddp
58 * @llimit: lower bound of the page pod memory
59 * @ulimit: upper bound of the page pod memory
60 * @nppods: # of page pod entries
61 * @idx_last: page pod entry last used
62 * @idx_bits: # of bits the pagepod index would take
63 * @idx_mask: pagepod index mask
64 * @rsvd_tag_mask: tag mask
65 * @map_lock: lock to synchonize access to the page pod map
66 * @gl_map: ddp memory gather list
67 * @gl_skb: skb used to program the pagepod
68 */
69struct cxgb3i_ddp_info {
70 struct list_head list;
71 struct t3cdev *tdev;
72 struct pci_dev *pdev;
73 unsigned int max_txsz;
74 unsigned int max_rxsz;
75 unsigned int llimit;
76 unsigned int ulimit;
77 unsigned int nppods;
78 unsigned int idx_last;
79 unsigned char idx_bits;
80 unsigned char filler[3];
81 u32 idx_mask;
82 u32 rsvd_tag_mask;
83 spinlock_t map_lock;
84 struct cxgb3i_gather_list **gl_map;
85 struct sk_buff **gl_skb;
86};
87
88#define ULP2_MAX_PKT_SIZE 16224
89#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
90#define PPOD_PAGES_MAX 4
91#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
92
93/*
94 * struct pagepod_hdr, pagepod - pagepod format
95 */
96struct pagepod_hdr {
97 u32 vld_tid;
98 u32 pgsz_tag_clr;
99 u32 maxoffset;
100 u32 pgoffset;
101 u64 rsvd;
102};
103
104struct pagepod {
105 struct pagepod_hdr hdr;
106 u64 addr[PPOD_PAGES_MAX + 1];
107};
108
109#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
110#define PPOD_SIZE_SHIFT 6
111
112#define PPOD_COLOR_SHIFT 0
113#define PPOD_COLOR_SIZE 6
114#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
115
116#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
117#define PPOD_IDX_MAX_SIZE 24
118
119#define S_PPOD_TID 0
120#define M_PPOD_TID 0xFFFFFF
121#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
122
123#define S_PPOD_VALID 24
124#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
125#define F_PPOD_VALID V_PPOD_VALID(1U)
126
127#define S_PPOD_COLOR 0
128#define M_PPOD_COLOR 0x3F
129#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
130
131#define S_PPOD_TAG 6
132#define M_PPOD_TAG 0xFFFFFF
133#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
134
135#define S_PPOD_PGSZ 30
136#define M_PPOD_PGSZ 0x3
137#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
138
139/*
140 * large memory chunk allocation/release
141 * use vmalloc() if kmalloc() fails
142 */
143static inline void *cxgb3i_alloc_big_mem(unsigned int size,
144 gfp_t gfp)
145{
146 void *p = kmalloc(size, gfp);
147 if (!p)
148 p = vmalloc(size);
149 if (p)
150 memset(p, 0, size);
151 return p;
152}
153
154static inline void cxgb3i_free_big_mem(void *addr)
155{
156 if (is_vmalloc_addr(addr))
157 vfree(addr);
158 else
159 kfree(addr);
160}
161
162/*
163 * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
164 * non-reserved bits that can be used by the iscsi s/w.
165 * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
166 * in struct cxgb3i_tag_format.
167 *
168 * The upper most reserved bit can be used to check if a tag is ddp tag or not:
169 * if the bit is 0, the tag is a valid ddp tag
170 */
171
172/**
173 * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
174 * @tformat: tag format information
175 * @tag: tag to be checked
176 *
177 * return true if the tag is a ddp tag, false otherwise.
178 */
179static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
180{
181 return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
182}
183
184/**
185 * cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for
186 * the reserved/hw bits
187 * @tformat: tag format information
188 * @sw_tag: s/w tag to be checked
189 *
190 * return true if the tag is a ddp tag, false otherwise.
191 */
192static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
193 u32 sw_tag)
194{
195 sw_tag >>= (32 - tformat->rsvd_bits);
196 return !sw_tag;
197}
198
199/**
200 * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
201 * @tformat: tag format information
202 * @sw_tag: s/w tag to be checked
203 *
204 * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
205 */
206static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
207 u32 sw_tag)
208{
209 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
210 u32 mask = (1 << shift) - 1;
211
212 if (sw_tag && (sw_tag & ~mask)) {
213 u32 v1 = sw_tag & ((1 << shift) - 1);
214 u32 v2 = (sw_tag >> (shift - 1)) << shift;
215
216 return v2 | v1 | 1 << shift;
217 }
218 return sw_tag | 1 << shift;
219}
220
221/**
222 * cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not
223 * used.
224 * @tformat: tag format information
225 * @sw_tag: s/w tag to be checked
226 */
227static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
228 u32 sw_tag)
229{
230 u32 mask = (1 << tformat->rsvd_shift) - 1;
231
232 if (sw_tag && (sw_tag & ~mask)) {
233 u32 v1 = sw_tag & mask;
234 u32 v2 = sw_tag >> tformat->rsvd_shift;
235
236 v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
237 return v2 | v1;
238 }
239 return sw_tag;
240}
241
242/**
243 * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
244 * @tformat: tag format information
245 * @tag: tag to be checked
246 *
247 * return the reserved bits in the tag
248 */
249static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
250 u32 tag)
251{
252 if (cxgb3i_is_ddp_tag(tformat, tag))
253 return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
254 return 0;
255}
256
257/**
258 * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
259 * @tformat: tag format information
260 * @tag: tag to be checked
261 *
262 * return the non-reserved bits in the tag.
263 */
264static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
265 u32 tag)
266{
267 unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
268 u32 v1, v2;
269
270 if (cxgb3i_is_ddp_tag(tformat, tag)) {
271 v1 = tag & ((1 << tformat->rsvd_shift) - 1);
272 v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
273 } else {
274 u32 mask = (1 << shift) - 1;
275
276 tag &= ~(1 << shift);
277 v1 = tag & mask;
278 v2 = (tag >> 1) & ~mask;
279 }
280 return v1 | v2;
281}
282
283int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
284 struct cxgb3i_tag_format *, u32 *tag,
285 struct cxgb3i_gather_list *, gfp_t gfp);
286void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
287
288struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
289 struct scatterlist *sgl,
290 unsigned int sgcnt,
291 struct pci_dev *pdev,
292 gfp_t gfp);
293void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
294 struct pci_dev *pdev);
295
296int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
297 int reply);
298int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
299 unsigned long pgsz);
300int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
301 int hcrc, int dcrc, int reply);
302int cxgb3i_ddp_find_page_index(unsigned long pgsz);
303int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
304 unsigned int *txsz, unsigned int *rxsz);
305void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
306#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
new file mode 100644
index 000000000000..091ecb4d9f3d
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -0,0 +1,107 @@
1/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Karen Xie (kxie@chelsio.com)
10 */
11
12#include "cxgb3i.h"
13
14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.0"
16#define DRV_MODULE_RELDATE "Jun. 1, 2008"
17
18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
20 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
21
22MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
23MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
24MODULE_LICENSE("GPL");
25MODULE_VERSION(DRV_MODULE_VERSION);
26
27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *);
29
30static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
31static struct cxgb3_client t3c_client = {
32 .name = "iscsi_cxgb3",
33 .handlers = cxgb3i_cpl_handlers,
34 .add = open_s3_dev,
35 .remove = close_s3_dev,
36};
37
38/**
39 * open_s3_dev - register with cxgb3 LLD
40 * @t3dev: cxgb3 adapter instance
41 */
42static void open_s3_dev(struct t3cdev *t3dev)
43{
44 static int vers_printed;
45
46 if (!vers_printed) {
47 printk(KERN_INFO "%s", version);
48 vers_printed = 1;
49 }
50
51 cxgb3i_sdev_add(t3dev, &t3c_client);
52 cxgb3i_adapter_add(t3dev);
53}
54
55/**
56 * close_s3_dev - de-register with cxgb3 LLD
57 * @t3dev: cxgb3 adapter instance
58 */
59static void close_s3_dev(struct t3cdev *t3dev)
60{
61 cxgb3i_adapter_remove(t3dev);
62 cxgb3i_sdev_remove(t3dev);
63}
64
65/**
66 * cxgb3i_init_module - module init entry point
67 *
68 * initialize any driver wide global data structures and register itself
69 * with the cxgb3 module
70 */
71static int __init cxgb3i_init_module(void)
72{
73 int err;
74
75 err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
76 if (err < 0)
77 return err;
78
79 err = cxgb3i_iscsi_init();
80 if (err < 0)
81 return err;
82
83 err = cxgb3i_pdu_init();
84 if (err < 0)
85 return err;
86
87 cxgb3_register_client(&t3c_client);
88
89 return 0;
90}
91
92/**
93 * cxgb3i_exit_module - module cleanup/exit entry point
94 *
95 * go through the driver hba list and for each hba, release any resource held.
96 * and unregisters iscsi transport and the cxgb3 module
97 */
98static void __exit cxgb3i_exit_module(void)
99{
100 cxgb3_unregister_client(&t3c_client);
101 cxgb3i_pdu_cleanup();
102 cxgb3i_iscsi_cleanup();
103 cxgb3i_sdev_cleanup();
104}
105
106module_init(cxgb3i_init_module);
107module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
new file mode 100644
index 000000000000..d83464b9b3f9
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -0,0 +1,951 @@
1/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
2 *
3 * Copyright (c) 2008 Chelsio Communications, Inc.
4 * Copyright (c) 2008 Mike Christie
5 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Karen Xie (kxie@chelsio.com)
12 */
13
14#include <linux/inet.h>
15#include <linux/crypto.h>
16#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h>
19#include <scsi/scsi_eh.h>
20#include <scsi/scsi_host.h>
21#include <scsi/scsi.h>
22#include <scsi/iscsi_proto.h>
23#include <scsi/libiscsi.h>
24#include <scsi/scsi_transport_iscsi.h>
25
26#include "cxgb3i.h"
27#include "cxgb3i_pdu.h"
28
29#ifdef __DEBUG_CXGB3I_TAG__
30#define cxgb3i_tag_debug cxgb3i_log_debug
31#else
32#define cxgb3i_tag_debug(fmt...)
33#endif
34
35#ifdef __DEBUG_CXGB3I_API__
36#define cxgb3i_api_debug cxgb3i_log_debug
37#else
38#define cxgb3i_api_debug(fmt...)
39#endif
40
41/*
42 * align pdu size to multiple of 512 for better performance
43 */
44#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
45
46static struct scsi_transport_template *cxgb3i_scsi_transport;
47static struct scsi_host_template cxgb3i_host_template;
48static struct iscsi_transport cxgb3i_iscsi_transport;
49static unsigned char sw_tag_idx_bits;
50static unsigned char sw_tag_age_bits;
51
52static LIST_HEAD(cxgb3i_snic_list);
53static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
54
55/**
56 * cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
57 * @t3dev: t3cdev adapter
58 * return the resulting cxgb3i_adapter struct
59 */
60struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
61{
62 struct cxgb3i_adapter *snic;
63 struct adapter *adapter = tdev2adap(t3dev);
64 int i;
65
66 snic = kzalloc(sizeof(*snic), GFP_KERNEL);
67 if (!snic) {
68 cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
69 return NULL;
70 }
71 spin_lock_init(&snic->lock);
72
73 snic->tdev = t3dev;
74 snic->pdev = adapter->pdev;
75 snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
76
77 if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
78 &snic->tx_max_size,
79 &snic->rx_max_size) < 0)
80 goto free_snic;
81
82 for_each_port(adapter, i) {
83 snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
84 if (!snic->hba[i])
85 goto ulp_cleanup;
86 }
87 snic->hba_cnt = adapter->params.nports;
88
89 /* add to the list */
90 write_lock(&cxgb3i_snic_rwlock);
91 list_add_tail(&snic->list_head, &cxgb3i_snic_list);
92 write_unlock(&cxgb3i_snic_rwlock);
93
94 return snic;
95
96ulp_cleanup:
97 cxgb3i_adapter_ddp_cleanup(t3dev);
98free_snic:
99 kfree(snic);
100 return NULL;
101}
102
103/**
104 * cxgb3i_adapter_remove - release all the resources held and cleanup any
105 * h/w settings
106 * @t3dev: t3cdev adapter
107 */
108void cxgb3i_adapter_remove(struct t3cdev *t3dev)
109{
110 int i;
111 struct cxgb3i_adapter *snic;
112
113 /* remove from the list */
114 write_lock(&cxgb3i_snic_rwlock);
115 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
116 if (snic->tdev == t3dev) {
117 list_del(&snic->list_head);
118 break;
119 }
120 }
121 write_unlock(&cxgb3i_snic_rwlock);
122
123 if (snic) {
124 for (i = 0; i < snic->hba_cnt; i++) {
125 if (snic->hba[i]) {
126 cxgb3i_hba_host_remove(snic->hba[i]);
127 snic->hba[i] = NULL;
128 }
129 }
130
131 /* release ddp resources */
132 cxgb3i_adapter_ddp_cleanup(snic->tdev);
133 kfree(snic);
134 }
135}
136
137/**
138 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given
139 * net_device
140 * @t3dev: t3cdev adapter
141 */
142struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
143{
144 struct cxgb3i_adapter *snic;
145 int i;
146
147 read_lock(&cxgb3i_snic_rwlock);
148 list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
149 for (i = 0; i < snic->hba_cnt; i++) {
150 if (snic->hba[i]->ndev == ndev) {
151 read_unlock(&cxgb3i_snic_rwlock);
152 return snic->hba[i];
153 }
154 }
155 }
156 read_unlock(&cxgb3i_snic_rwlock);
157 return NULL;
158}
159
160/**
161 * cxgb3i_hba_host_add - register a new host with scsi/iscsi
162 * @snic: the cxgb3i adapter
163 * @ndev: associated net_device
164 */
165struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
166 struct net_device *ndev)
167{
168 struct cxgb3i_hba *hba;
169 struct Scsi_Host *shost;
170 int err;
171
172 shost = iscsi_host_alloc(&cxgb3i_host_template,
173 sizeof(struct cxgb3i_hba),
174 CXGB3I_SCSI_QDEPTH_DFLT);
175 if (!shost) {
176 cxgb3i_log_info("iscsi_host_alloc failed.\n");
177 return NULL;
178 }
179
180 shost->transportt = cxgb3i_scsi_transport;
181 shost->max_lun = CXGB3I_MAX_LUN;
182 shost->max_id = CXGB3I_MAX_TARGET;
183 shost->max_channel = 0;
184 shost->max_cmd_len = 16;
185
186 hba = iscsi_host_priv(shost);
187 hba->snic = snic;
188 hba->ndev = ndev;
189 hba->shost = shost;
190
191 pci_dev_get(snic->pdev);
192 err = iscsi_host_add(shost, &snic->pdev->dev);
193 if (err) {
194 cxgb3i_log_info("iscsi_host_add failed.\n");
195 goto pci_dev_put;
196 }
197
198 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
199 shost, hba, shost->host_no);
200
201 return hba;
202
203pci_dev_put:
204 pci_dev_put(snic->pdev);
205 scsi_host_put(shost);
206 return NULL;
207}
208
209/**
210 * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
211 * @hba: the cxgb3i hba
212 */
213void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
214{
215 cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
216 hba->shost, hba, hba->shost->host_no);
217 iscsi_host_remove(hba->shost);
218 pci_dev_put(hba->snic->pdev);
219 iscsi_host_free(hba->shost);
220}
221
222/**
223 * cxgb3i_ep_connect - establish TCP connection to target portal
224 * @dst_addr: target IP address
225 * @non_blocking: blocking or non-blocking call
226 *
227 * Initiates a TCP/IP connection to the dst_addr
228 */
229static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
230 int non_blocking)
231{
232 struct iscsi_endpoint *ep;
233 struct cxgb3i_endpoint *cep;
234 struct cxgb3i_hba *hba;
235 struct s3_conn *c3cn = NULL;
236 int err = 0;
237
238 c3cn = cxgb3i_c3cn_create();
239 if (!c3cn) {
240 cxgb3i_log_info("ep connect OOM.\n");
241 err = -ENOMEM;
242 goto release_conn;
243 }
244
245 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
246 if (err < 0) {
247 cxgb3i_log_info("ep connect failed.\n");
248 goto release_conn;
249 }
250 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
251 if (!hba) {
252 err = -ENOSPC;
253 cxgb3i_log_info("NOT going through cxgbi device.\n");
254 goto release_conn;
255 }
256 if (c3cn_is_closing(c3cn)) {
257 err = -ENOSPC;
258 cxgb3i_log_info("ep connect unable to connect.\n");
259 goto release_conn;
260 }
261
262 ep = iscsi_create_endpoint(sizeof(*cep));
263 if (!ep) {
264 err = -ENOMEM;
265 cxgb3i_log_info("iscsi alloc ep, OOM.\n");
266 goto release_conn;
267 }
268 cep = ep->dd_data;
269 cep->c3cn = c3cn;
270 cep->hba = hba;
271
272 cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
273 ep, cep, c3cn, hba);
274 return ep;
275
276release_conn:
277 cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
278 if (c3cn)
279 cxgb3i_c3cn_release(c3cn);
280 return ERR_PTR(err);
281}
282
283/**
284 * cxgb3i_ep_poll - polls for TCP connection establishement
285 * @ep: TCP connection (endpoint) handle
286 * @timeout_ms: timeout value in milli secs
287 *
288 * polls for TCP connect request to complete
289 */
290static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
291{
292 struct cxgb3i_endpoint *cep = ep->dd_data;
293 struct s3_conn *c3cn = cep->c3cn;
294
295 if (!c3cn_is_established(c3cn))
296 return 0;
297 cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
298 return 1;
299}
300
301/**
302 * cxgb3i_ep_disconnect - teardown TCP connection
303 * @ep: TCP connection (endpoint) handle
304 *
305 * teardown TCP connection
306 */
307static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
308{
309 struct cxgb3i_endpoint *cep = ep->dd_data;
310 struct cxgb3i_conn *cconn = cep->cconn;
311
312 cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
313
314 if (cconn && cconn->conn) {
315 /*
316 * stop the xmit path so the xmit_pdu function is
317 * not being called
318 */
319 iscsi_suspend_tx(cconn->conn);
320
321 write_lock_bh(&cep->c3cn->callback_lock);
322 cep->c3cn->user_data = NULL;
323 cconn->cep = NULL;
324 write_unlock_bh(&cep->c3cn->callback_lock);
325 }
326
327 cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
328 ep, cep, cep->c3cn);
329 cxgb3i_c3cn_release(cep->c3cn);
330 iscsi_destroy_endpoint(ep);
331}
332
333/**
334 * cxgb3i_session_create - create a new iscsi session
335 * @cmds_max: max # of commands
336 * @qdepth: scsi queue depth
337 * @initial_cmdsn: initial iscsi CMDSN for this session
338 * @host_no: pointer to return host no
339 *
340 * Creates a new iSCSI session
341 */
342static struct iscsi_cls_session *
343cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
344 u32 initial_cmdsn, u32 *host_no)
345{
346 struct cxgb3i_endpoint *cep;
347 struct cxgb3i_hba *hba;
348 struct Scsi_Host *shost;
349 struct iscsi_cls_session *cls_session;
350 struct iscsi_session *session;
351
352 if (!ep) {
353 cxgb3i_log_error("%s, missing endpoint.\n", __func__);
354 return NULL;
355 }
356
357 cep = ep->dd_data;
358 hba = cep->hba;
359 shost = hba->shost;
360 cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
361 BUG_ON(hba != iscsi_host_priv(shost));
362
363 *host_no = shost->host_no;
364
365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
366 cmds_max,
367 sizeof(struct iscsi_tcp_task),
368 initial_cmdsn, ISCSI_MAX_TARGET);
369 if (!cls_session)
370 return NULL;
371 session = cls_session->dd_data;
372 if (iscsi_tcp_r2tpool_alloc(session))
373 goto remove_session;
374
375 return cls_session;
376
377remove_session:
378 iscsi_session_teardown(cls_session);
379 return NULL;
380}
381
382/**
383 * cxgb3i_session_destroy - destroys iscsi session
384 * @cls_session: pointer to iscsi cls session
385 *
386 * Destroys an iSCSI session instance and releases its all resources held
387 */
388static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
389{
390 cxgb3i_api_debug("sess 0x%p.\n", cls_session);
391 iscsi_tcp_r2tpool_free(cls_session->dd_data);
392 iscsi_session_teardown(cls_session);
393}
394
395/**
396 * cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size,
397 * reduce it to be within the hardware limit if needed
398 * @conn: iscsi connection
399 */
400static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
401
402{
403 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
404 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
405 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
406 cconn->hba->snic->tx_max_size -
407 ISCSI_PDU_NONPAYLOAD_MAX);
408
409 if (conn->max_xmit_dlength)
410 conn->max_xmit_dlength = min_t(unsigned int,
411 conn->max_xmit_dlength, max);
412 else
413 conn->max_xmit_dlength = max;
414 align_pdu_size(conn->max_xmit_dlength);
415 cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
416 conn, conn->max_xmit_dlength);
417 return 0;
418}
419
420/**
421 * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against
422 * the hardware limit
423 * @conn: iscsi connection
424 * return 0 if the value is valid, < 0 otherwise.
425 */
426static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
427{
428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
429 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
430 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
431 cconn->hba->snic->rx_max_size -
432 ISCSI_PDU_NONPAYLOAD_MAX);
433
434 align_pdu_size(max);
435 if (conn->max_recv_dlength) {
436 if (conn->max_recv_dlength > max) {
437 cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
438 " Need to be <= %u.\n",
439 conn->max_recv_dlength, max);
440 return -EINVAL;
441 }
442 conn->max_recv_dlength = min_t(unsigned int,
443 conn->max_recv_dlength, max);
444 align_pdu_size(conn->max_recv_dlength);
445 } else
446 conn->max_recv_dlength = max;
447 cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
448 conn, conn->max_recv_dlength);
449 return 0;
450}
451
452/**
453 * cxgb3i_conn_create - create iscsi connection instance
454 * @cls_session: pointer to iscsi cls session
455 * @cid: iscsi cid
456 *
457 * Creates a new iSCSI connection instance for a given session
458 */
459static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
460 *cls_session, u32 cid)
461{
462 struct iscsi_cls_conn *cls_conn;
463 struct iscsi_conn *conn;
464 struct iscsi_tcp_conn *tcp_conn;
465 struct cxgb3i_conn *cconn;
466
467 cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
468
469 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
470 if (!cls_conn)
471 return NULL;
472 conn = cls_conn->dd_data;
473 tcp_conn = conn->dd_data;
474 cconn = tcp_conn->dd_data;
475
476 cconn->conn = conn;
477 return cls_conn;
478}
479
480/**
481 * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
482 * @cls_session: pointer to iscsi cls session
483 * @cls_conn: pointer to iscsi cls conn
484 * @transport_eph: 64-bit EP handle
485 * @is_leading: leading connection on this session?
486 *
487 * Binds together an iSCSI session, an iSCSI connection and a
488 * TCP connection. This routine returns error code if the TCP
489 * connection does not belong on the device iSCSI sess/conn is bound
490 */
491
492static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
493 struct iscsi_cls_conn *cls_conn,
494 u64 transport_eph, int is_leading)
495{
496 struct iscsi_conn *conn = cls_conn->dd_data;
497 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
498 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
499 struct cxgb3i_adapter *snic;
500 struct iscsi_endpoint *ep;
501 struct cxgb3i_endpoint *cep;
502 struct s3_conn *c3cn;
503 int err;
504
505 ep = iscsi_lookup_endpoint(transport_eph);
506 if (!ep)
507 return -EINVAL;
508
509 /* setup ddp pagesize */
510 cep = ep->dd_data;
511 c3cn = cep->c3cn;
512 snic = cep->hba->snic;
513 err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
514 if (err < 0)
515 return err;
516
517 cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
518 ep, cls_session, cls_conn);
519
520 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
521 if (err)
522 return -EINVAL;
523
524 /* calculate the tag idx bits needed for this conn based on cmds_max */
525 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
526 cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
527 conn->session->cmds_max, cconn->task_idx_bits);
528
529 read_lock(&c3cn->callback_lock);
530 c3cn->user_data = conn;
531 cconn->hba = cep->hba;
532 cconn->cep = cep;
533 cep->cconn = cconn;
534 read_unlock(&c3cn->callback_lock);
535
536 cxgb3i_conn_max_xmit_dlength(conn);
537 cxgb3i_conn_max_recv_dlength(conn);
538
539 spin_lock_bh(&conn->session->lock);
540 sprintf(conn->portal_address, NIPQUAD_FMT,
541 NIPQUAD(c3cn->daddr.sin_addr.s_addr));
542 conn->portal_port = ntohs(c3cn->daddr.sin_port);
543 spin_unlock_bh(&conn->session->lock);
544
545 /* init recv engine */
546 iscsi_tcp_hdr_recv_prep(tcp_conn);
547
548 return 0;
549}
550
551/**
552 * cxgb3i_conn_get_param - return iscsi connection parameter to caller
553 * @cls_conn: pointer to iscsi cls conn
554 * @param: parameter type identifier
555 * @buf: buffer pointer
556 *
557 * returns iSCSI connection parameters
558 */
559static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
560 enum iscsi_param param, char *buf)
561{
562 struct iscsi_conn *conn = cls_conn->dd_data;
563 int len;
564
565 cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
566
567 switch (param) {
568 case ISCSI_PARAM_CONN_PORT:
569 spin_lock_bh(&conn->session->lock);
570 len = sprintf(buf, "%hu\n", conn->portal_port);
571 spin_unlock_bh(&conn->session->lock);
572 break;
573 case ISCSI_PARAM_CONN_ADDRESS:
574 spin_lock_bh(&conn->session->lock);
575 len = sprintf(buf, "%s\n", conn->portal_address);
576 spin_unlock_bh(&conn->session->lock);
577 break;
578 default:
579 return iscsi_conn_get_param(cls_conn, param, buf);
580 }
581
582 return len;
583}
584
585/**
586 * cxgb3i_conn_set_param - set iscsi connection parameter
587 * @cls_conn: pointer to iscsi cls conn
588 * @param: parameter type identifier
589 * @buf: buffer pointer
590 * @buflen: buffer length
591 *
592 * set iSCSI connection parameters
593 */
594static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
595 enum iscsi_param param, char *buf, int buflen)
596{
597 struct iscsi_conn *conn = cls_conn->dd_data;
598 struct iscsi_session *session = conn->session;
599 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
600 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
601 struct cxgb3i_adapter *snic = cconn->hba->snic;
602 struct s3_conn *c3cn = cconn->cep->c3cn;
603 int value, err = 0;
604
605 switch (param) {
606 case ISCSI_PARAM_HDRDGST_EN:
607 err = iscsi_set_param(cls_conn, param, buf, buflen);
608 if (!err && conn->hdrdgst_en)
609 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
610 conn->hdrdgst_en,
611 conn->datadgst_en, 0);
612 break;
613 case ISCSI_PARAM_DATADGST_EN:
614 err = iscsi_set_param(cls_conn, param, buf, buflen);
615 if (!err && conn->datadgst_en)
616 err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
617 conn->hdrdgst_en,
618 conn->datadgst_en, 0);
619 break;
620 case ISCSI_PARAM_MAX_R2T:
621 sscanf(buf, "%d", &value);
622 if (value <= 0 || !is_power_of_2(value))
623 return -EINVAL;
624 if (session->max_r2t == value)
625 break;
626 iscsi_tcp_r2tpool_free(session);
627 err = iscsi_set_param(cls_conn, param, buf, buflen);
628 if (!err && iscsi_tcp_r2tpool_alloc(session))
629 return -ENOMEM;
630 case ISCSI_PARAM_MAX_RECV_DLENGTH:
631 err = iscsi_set_param(cls_conn, param, buf, buflen);
632 if (!err)
633 err = cxgb3i_conn_max_recv_dlength(conn);
634 break;
635 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
636 err = iscsi_set_param(cls_conn, param, buf, buflen);
637 if (!err)
638 err = cxgb3i_conn_max_xmit_dlength(conn);
639 break;
640 default:
641 return iscsi_set_param(cls_conn, param, buf, buflen);
642 }
643 return err;
644}
645
646/**
647 * cxgb3i_host_set_param - configure host (adapter) related parameters
648 * @shost: scsi host pointer
649 * @param: parameter type identifier
650 * @buf: buffer pointer
651 */
652static int cxgb3i_host_set_param(struct Scsi_Host *shost,
653 enum iscsi_host_param param,
654 char *buf, int buflen)
655{
656 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
657
658 cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
659
660 switch (param) {
661 case ISCSI_HOST_PARAM_IPADDRESS:
662 {
663 __be32 addr = in_aton(buf);
664 cxgb3i_set_private_ipv4addr(hba->ndev, addr);
665 return 0;
666 }
667 case ISCSI_HOST_PARAM_HWADDRESS:
668 case ISCSI_HOST_PARAM_NETDEV_NAME:
669 /* ignore */
670 return 0;
671 default:
672 return iscsi_host_set_param(shost, param, buf, buflen);
673 }
674}
675
676/**
677 * cxgb3i_host_get_param - returns host (adapter) related parameters
678 * @shost: scsi host pointer
679 * @param: parameter type identifier
680 * @buf: buffer pointer
681 */
682static int cxgb3i_host_get_param(struct Scsi_Host *shost,
683 enum iscsi_host_param param, char *buf)
684{
685 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
686 int len = 0;
687
688 cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
689
690 switch (param) {
691 case ISCSI_HOST_PARAM_HWADDRESS:
692 len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
693 break;
694 case ISCSI_HOST_PARAM_NETDEV_NAME:
695 len = sprintf(buf, "%s\n", hba->ndev->name);
696 break;
697 case ISCSI_HOST_PARAM_IPADDRESS:
698 {
699 __be32 addr;
700
701 addr = cxgb3i_get_private_ipv4addr(hba->ndev);
702 len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
703 break;
704 }
705 default:
706 return iscsi_host_get_param(shost, param, buf);
707 }
708 return len;
709}
710
711/**
712 * cxgb3i_conn_get_stats - returns iSCSI stats
713 * @cls_conn: pointer to iscsi cls conn
714 * @stats: pointer to iscsi statistic struct
715 */
716static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
717 struct iscsi_stats *stats)
718{
719 struct iscsi_conn *conn = cls_conn->dd_data;
720
721 stats->txdata_octets = conn->txdata_octets;
722 stats->rxdata_octets = conn->rxdata_octets;
723 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
724 stats->dataout_pdus = conn->dataout_pdus_cnt;
725 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
726 stats->datain_pdus = conn->datain_pdus_cnt;
727 stats->r2t_pdus = conn->r2t_pdus_cnt;
728 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
729 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
730 stats->digest_err = 0;
731 stats->timeout_err = 0;
732 stats->custom_length = 1;
733 strcpy(stats->custom[0].desc, "eh_abort_cnt");
734 stats->custom[0].value = conn->eh_abort_cnt;
735}
736
737/**
738 * cxgb3i_parse_itt - get the idx and age bits from a given tag
739 * @conn: iscsi connection
740 * @itt: itt tag
741 * @idx: task index, filled in by this function
742 * @age: session age, filled in by this function
743 */
744static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
745 int *idx, int *age)
746{
747 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
748 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
749 struct cxgb3i_adapter *snic = cconn->hba->snic;
750 u32 tag = ntohl((__force u32) itt);
751 u32 sw_bits;
752
753 sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
754 if (idx)
755 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
756 if (age)
757 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
758
759 cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
760 tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
761 age ? *age : 0xFF);
762}
763
764/**
765 * cxgb3i_reserve_itt - generate tag for a give task
766 * Try to set up ddp for a scsi read task.
767 * @task: iscsi task
768 * @hdr_itt: tag, filled in by this function
769 */
770int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
771{
772 struct scsi_cmnd *sc = task->sc;
773 struct iscsi_conn *conn = task->conn;
774 struct iscsi_session *sess = conn->session;
775 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
776 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
777 struct cxgb3i_adapter *snic = cconn->hba->snic;
778 struct cxgb3i_tag_format *tformat = &snic->tag_format;
779 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
780 u32 tag;
781 int err = -EINVAL;
782
783 if (sc &&
784 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
785 cxgb3i_sw_tag_usable(tformat, sw_tag)) {
786 struct s3_conn *c3cn = cconn->cep->c3cn;
787 struct cxgb3i_gather_list *gl;
788
789 gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
790 scsi_in(sc)->table.sgl,
791 scsi_in(sc)->table.nents,
792 snic->pdev,
793 GFP_ATOMIC);
794 if (gl) {
795 tag = sw_tag;
796 err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
797 tformat, &tag,
798 gl, GFP_ATOMIC);
799 if (err < 0)
800 cxgb3i_ddp_release_gl(gl, snic->pdev);
801 }
802 }
803
804 if (err < 0)
805 tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
806 /* the itt need to sent in big-endian order */
807 *hdr_itt = (__force itt_t)htonl(tag);
808
809 cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
810 tag, *hdr_itt, task->itt, sess->age);
811 return 0;
812}
813
814/**
815 * cxgb3i_release_itt - release the tag for a given task
816 * if the tag is a ddp tag, release the ddp setup
817 * @task: iscsi task
818 * @hdr_itt: tag
819 */
820void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
821{
822 struct scsi_cmnd *sc = task->sc;
823 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
824 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
825 struct cxgb3i_adapter *snic = cconn->hba->snic;
826 struct cxgb3i_tag_format *tformat = &snic->tag_format;
827 u32 tag = ntohl((__force u32)hdr_itt);
828
829 cxgb3i_tag_debug("release tag 0x%x.\n", tag);
830
831 if (sc &&
832 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
833 cxgb3i_is_ddp_tag(tformat, tag))
834 cxgb3i_ddp_tag_release(snic->tdev, tag);
835}
836
837/**
838 * cxgb3i_host_template -- Scsi_Host_Template structure
839 * used when registering with the scsi mid layer
840 */
841static struct scsi_host_template cxgb3i_host_template = {
842 .module = THIS_MODULE,
843 .name = "Chelsio S3xx iSCSI Initiator",
844 .proc_name = "cxgb3i",
845 .queuecommand = iscsi_queuecommand,
846 .change_queue_depth = iscsi_change_queue_depth,
847 .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
848 .sg_tablesize = SG_ALL,
849 .max_sectors = 0xFFFF,
850 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
851 .eh_abort_handler = iscsi_eh_abort,
852 .eh_device_reset_handler = iscsi_eh_device_reset,
853 .eh_target_reset_handler = iscsi_eh_target_reset,
854 .use_clustering = DISABLE_CLUSTERING,
855 .this_id = -1,
856};
857
858static struct iscsi_transport cxgb3i_iscsi_transport = {
859 .owner = THIS_MODULE,
860 .name = "cxgb3i",
861 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
862 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
863 CAP_PADDING_OFFLOAD,
864 .param_mask = ISCSI_MAX_RECV_DLENGTH |
865 ISCSI_MAX_XMIT_DLENGTH |
866 ISCSI_HDRDGST_EN |
867 ISCSI_DATADGST_EN |
868 ISCSI_INITIAL_R2T_EN |
869 ISCSI_MAX_R2T |
870 ISCSI_IMM_DATA_EN |
871 ISCSI_FIRST_BURST |
872 ISCSI_MAX_BURST |
873 ISCSI_PDU_INORDER_EN |
874 ISCSI_DATASEQ_INORDER_EN |
875 ISCSI_ERL |
876 ISCSI_CONN_PORT |
877 ISCSI_CONN_ADDRESS |
878 ISCSI_EXP_STATSN |
879 ISCSI_PERSISTENT_PORT |
880 ISCSI_PERSISTENT_ADDRESS |
881 ISCSI_TARGET_NAME | ISCSI_TPGT |
882 ISCSI_USERNAME | ISCSI_PASSWORD |
883 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
884 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
885 ISCSI_LU_RESET_TMO |
886 ISCSI_PING_TMO | ISCSI_RECV_TMO |
887 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
888 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
889 ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
890 .get_host_param = cxgb3i_host_get_param,
891 .set_host_param = cxgb3i_host_set_param,
892 /* session management */
893 .create_session = cxgb3i_session_create,
894 .destroy_session = cxgb3i_session_destroy,
895 .get_session_param = iscsi_session_get_param,
896 /* connection management */
897 .create_conn = cxgb3i_conn_create,
898 .bind_conn = cxgb3i_conn_bind,
899 .destroy_conn = iscsi_tcp_conn_teardown,
900 .start_conn = iscsi_conn_start,
901 .stop_conn = iscsi_conn_stop,
902 .get_conn_param = cxgb3i_conn_get_param,
903 .set_param = cxgb3i_conn_set_param,
904 .get_stats = cxgb3i_conn_get_stats,
905 /* pdu xmit req. from user space */
906 .send_pdu = iscsi_conn_send_pdu,
907 /* task */
908 .init_task = iscsi_tcp_task_init,
909 .xmit_task = iscsi_tcp_task_xmit,
910 .cleanup_task = cxgb3i_conn_cleanup_task,
911
912 /* pdu */
913 .alloc_pdu = cxgb3i_conn_alloc_pdu,
914 .init_pdu = cxgb3i_conn_init_pdu,
915 .xmit_pdu = cxgb3i_conn_xmit_pdu,
916 .parse_pdu_itt = cxgb3i_parse_itt,
917
918 /* TCP connect/disconnect */
919 .ep_connect = cxgb3i_ep_connect,
920 .ep_poll = cxgb3i_ep_poll,
921 .ep_disconnect = cxgb3i_ep_disconnect,
922 /* Error recovery timeout call */
923 .session_recovery_timedout = iscsi_session_recovery_timedout,
924};
925
926int cxgb3i_iscsi_init(void)
927{
928 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
929 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
930 cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
931 ISCSI_ITT_MASK, sw_tag_idx_bits,
932 ISCSI_AGE_MASK, sw_tag_age_bits);
933
934 cxgb3i_scsi_transport =
935 iscsi_register_transport(&cxgb3i_iscsi_transport);
936 if (!cxgb3i_scsi_transport) {
937 cxgb3i_log_error("Could not register cxgb3i transport.\n");
938 return -ENODEV;
939 }
940 cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
941 return 0;
942}
943
944void cxgb3i_iscsi_cleanup(void)
945{
946 if (cxgb3i_scsi_transport) {
947 cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
948 cxgb3i_scsi_transport);
949 iscsi_unregister_transport(&cxgb3i_iscsi_transport);
950 }
951}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
new file mode 100644
index 000000000000..a865f1fefe8b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -0,0 +1,1810 @@
1/*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/if_vlan.h>
16#include <linux/version.h>
17
18#include "cxgb3_defs.h"
19#include "cxgb3_ctl_defs.h"
20#include "firmware_exports.h"
21#include "cxgb3i_offload.h"
22#include "cxgb3i_pdu.h"
23#include "cxgb3i_ddp.h"
24
25#ifdef __DEBUG_C3CN_CONN__
26#define c3cn_conn_debug cxgb3i_log_info
27#else
28#define c3cn_conn_debug(fmt...)
29#endif
30
31#ifdef __DEBUG_C3CN_TX__
32#define c3cn_tx_debug cxgb3i_log_debug
33#else
34#define c3cn_tx_debug(fmt...)
35#endif
36
37#ifdef __DEBUG_C3CN_RX__
38#define c3cn_rx_debug cxgb3i_log_debug
39#else
40#define c3cn_rx_debug(fmt...)
41#endif
42
43/*
44 * module parameters releated to offloaded iscsi connection
45 */
46static int cxgb3_rcv_win = 256 * 1024;
47module_param(cxgb3_rcv_win, int, 0644);
48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
49
50static int cxgb3_snd_win = 64 * 1024;
51module_param(cxgb3_snd_win, int, 0644);
52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)");
53
54static int cxgb3_rx_credit_thres = 10 * 1024;
55module_param(cxgb3_rx_credit_thres, int, 0644);
56MODULE_PARM_DESC(rx_credit_thres,
57 "RX credits return threshold in bytes (default=10KB)");
58
59static unsigned int cxgb3_max_connect = 8 * 1024;
60module_param(cxgb3_max_connect, uint, 0644);
61MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
62
63static unsigned int cxgb3_sport_base = 20000;
64module_param(cxgb3_sport_base, uint, 0644);
65MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
66
67/*
68 * cxgb3i tcp connection data(per adapter) list
69 */
70static LIST_HEAD(cdata_list);
71static DEFINE_RWLOCK(cdata_rwlock);
72
73static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
74static void c3cn_release_offload_resources(struct s3_conn *c3cn);
75
76/*
77 * iscsi source port management
78 *
79 * Find a free source port in the port allocation map. We use a very simple
80 * rotor scheme to look for the next free port.
81 *
82 * If a source port has been specified make sure that it doesn't collide with
83 * our normal source port allocation map. If it's outside the range of our
84 * allocation/deallocation scheme just let them use it.
85 *
86 * If the source port is outside our allocation range, the caller is
87 * responsible for keeping track of their port usage.
88 */
89static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
90{
91 unsigned int start;
92 int idx;
93
94 if (!cdata)
95 goto error_out;
96
97 if (c3cn->saddr.sin_port != 0) {
98 idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
99 if (idx < 0 || idx >= cxgb3_max_connect)
100 return 0;
101 if (!test_and_set_bit(idx, cdata->sport_map))
102 return -EADDRINUSE;
103 }
104
105 /* the sport_map_next may not be accurate but that is okay, sport_map
106 should be */
107 start = idx = cdata->sport_map_next;
108 do {
109 if (++idx >= cxgb3_max_connect)
110 idx = 0;
111 if (!(test_and_set_bit(idx, cdata->sport_map))) {
112 c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
113 cdata->sport_map_next = idx;
114 c3cn_conn_debug("%s reserve port %u.\n",
115 cdata->cdev->name,
116 cxgb3_sport_base + idx);
117 return 0;
118 }
119 } while (idx != start);
120
121error_out:
122 return -EADDRNOTAVAIL;
123}
124
125static void c3cn_put_port(struct s3_conn *c3cn)
126{
127 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
128
129 if (c3cn->saddr.sin_port) {
130 int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
131
132 c3cn->saddr.sin_port = 0;
133 if (idx < 0 || idx >= cxgb3_max_connect)
134 return;
135 clear_bit(idx, cdata->sport_map);
136 c3cn_conn_debug("%s, release port %u.\n",
137 cdata->cdev->name, cxgb3_sport_base + idx);
138 }
139}
140
141static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
142{
143 __set_bit(flag, &c3cn->flags);
144 c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
145 c3cn, flag, c3cn->state, c3cn->flags);
146}
147
148static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
149{
150 __clear_bit(flag, &c3cn->flags);
151 c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
152 c3cn, flag, c3cn->state, c3cn->flags);
153}
154
155static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
156{
157 if (c3cn == NULL)
158 return 0;
159 return test_bit(flag, &c3cn->flags);
160}
161
162static void c3cn_set_state(struct s3_conn *c3cn, int state)
163{
164 c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
165 c3cn->state = state;
166}
167
168static inline void c3cn_hold(struct s3_conn *c3cn)
169{
170 atomic_inc(&c3cn->refcnt);
171}
172
173static inline void c3cn_put(struct s3_conn *c3cn)
174{
175 if (atomic_dec_and_test(&c3cn->refcnt)) {
176 c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
177 c3cn, c3cn->state, c3cn->flags);
178 kfree(c3cn);
179 }
180}
181
182static void c3cn_closed(struct s3_conn *c3cn)
183{
184 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
185 c3cn, c3cn->state, c3cn->flags);
186
187 c3cn_put_port(c3cn);
188 c3cn_release_offload_resources(c3cn);
189 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
190 cxgb3i_conn_closing(c3cn);
191}
192
193/*
194 * CPL (Chelsio Protocol Language) defines a message passing interface between
195 * the host driver and T3 asic.
196 * The section below implments CPLs that related to iscsi tcp connection
197 * open/close/abort and data send/receive.
198 */
199
200/*
201 * CPL connection active open request: host ->
202 */
203static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
204{
205 int i = 0;
206
207 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
208 ++i;
209 return i;
210}
211
212static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
213{
214 unsigned int idx;
215 struct dst_entry *dst = c3cn->dst_cache;
216 struct t3cdev *cdev = c3cn->cdev;
217 const struct t3c_data *td = T3C_DATA(cdev);
218 u16 advmss = dst_metric(dst, RTAX_ADVMSS);
219
220 if (advmss > pmtu - 40)
221 advmss = pmtu - 40;
222 if (advmss < td->mtus[0] - 40)
223 advmss = td->mtus[0] - 40;
224 idx = find_best_mtu(td, advmss + 40);
225 return idx;
226}
227
228static inline int compute_wscale(int win)
229{
230 int wscale = 0;
231 while (wscale < 14 && (65535<<wscale) < win)
232 wscale++;
233 return wscale;
234}
235
236static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
237{
238 int wscale = compute_wscale(cxgb3_rcv_win);
239 return V_KEEP_ALIVE(1) |
240 F_TCAM_BYPASS |
241 V_WND_SCALE(wscale) |
242 V_MSS_IDX(c3cn->mss_idx);
243}
244
245static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
246{
247 return V_ULP_MODE(ULP_MODE_ISCSI) |
248 V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
249}
250
251static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
252 unsigned int atid, const struct l2t_entry *e)
253{
254 struct cpl_act_open_req *req;
255
256 c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
257
258 skb->priority = CPL_PRIORITY_SETUP;
259 req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
260 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
261 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
262 req->local_port = c3cn->saddr.sin_port;
263 req->peer_port = c3cn->daddr.sin_port;
264 req->local_ip = c3cn->saddr.sin_addr.s_addr;
265 req->peer_ip = c3cn->daddr.sin_addr.s_addr;
266 req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
267 V_TX_CHANNEL(e->smt_idx));
268 req->opt0l = htonl(calc_opt0l(c3cn));
269 req->params = 0;
270}
271
272static void fail_act_open(struct s3_conn *c3cn, int errno)
273{
274 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
275 c3cn, c3cn->state, c3cn->flags);
276 c3cn->err = errno;
277 c3cn_closed(c3cn);
278}
279
280static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
281{
282 struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
283
284 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
285
286 c3cn_hold(c3cn);
287 spin_lock_bh(&c3cn->lock);
288 if (c3cn->state == C3CN_STATE_CONNECTING)
289 fail_act_open(c3cn, EHOSTUNREACH);
290 spin_unlock_bh(&c3cn->lock);
291 c3cn_put(c3cn);
292 __kfree_skb(skb);
293}
294
295/*
296 * CPL connection close request: host ->
297 *
298 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
299 * the write queue (i.e., after any unsent txt data).
300 */
301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
302 int flags)
303{
304 CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
305 CXGB3_SKB_CB(skb)->flags = flags;
306 __skb_queue_tail(&c3cn->write_queue, skb);
307}
308
309static void send_close_req(struct s3_conn *c3cn)
310{
311 struct sk_buff *skb = c3cn->cpl_close;
312 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
313 unsigned int tid = c3cn->tid;
314
315 c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
316 c3cn, c3cn->state, c3cn->flags);
317
318 c3cn->cpl_close = NULL;
319
320 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
321 req->wr.wr_lo = htonl(V_WR_TID(tid));
322 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
323 req->rsvd = htonl(c3cn->write_seq);
324
325 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
326 if (c3cn->state != C3CN_STATE_CONNECTING)
327 c3cn_push_tx_frames(c3cn, 1);
328}
329
330/*
331 * CPL connection abort request: host ->
332 *
333 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
334 * for the same connection and also that we do not try to send a message
335 * after the connection has closed.
336 */
337static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
338{
339 struct cpl_abort_req *req = cplhdr(skb);
340
341 c3cn_conn_debug("tdev 0x%p.\n", cdev);
342
343 req->cmd = CPL_ABORT_NO_RST;
344 cxgb3_ofld_send(cdev, skb);
345}
346
347static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
348{
349 struct sk_buff *skb;
350
351 while ((skb = __skb_dequeue(&c3cn->write_queue)))
352 __kfree_skb(skb);
353}
354
355static void send_abort_req(struct s3_conn *c3cn)
356{
357 struct sk_buff *skb = c3cn->cpl_abort_req;
358 struct cpl_abort_req *req;
359 unsigned int tid = c3cn->tid;
360
361 if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
362 !c3cn->cdev)
363 return;
364
365 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
366
367 c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
368
369 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
370
371 /* Purge the send queue so we don't send anything after an abort. */
372 c3cn_purge_write_queue(c3cn);
373
374 c3cn->cpl_abort_req = NULL;
375 req = (struct cpl_abort_req *)skb->head;
376
377 skb->priority = CPL_PRIORITY_DATA;
378 set_arp_failure_handler(skb, abort_arp_failure);
379
380 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
381 req->wr.wr_lo = htonl(V_WR_TID(tid));
382 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
383 req->rsvd0 = htonl(c3cn->snd_nxt);
384 req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
385 req->cmd = CPL_ABORT_SEND_RST;
386
387 l2t_send(c3cn->cdev, skb, c3cn->l2t);
388}
389
390/*
391 * CPL connection abort reply: host ->
392 *
393 * Send an ABORT_RPL message in response of the ABORT_REQ received.
394 */
395static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
396{
397 struct sk_buff *skb = c3cn->cpl_abort_rpl;
398 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
399
400 c3cn->cpl_abort_rpl = NULL;
401
402 skb->priority = CPL_PRIORITY_DATA;
403 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
404 rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
405 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
406 rpl->cmd = rst_status;
407
408 cxgb3_ofld_send(c3cn->cdev, skb);
409}
410
411/*
412 * CPL connection rx data ack: host ->
413 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
414 * credits sent.
415 */
416static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
417{
418 struct sk_buff *skb;
419 struct cpl_rx_data_ack *req;
420
421 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
422 if (!skb)
423 return 0;
424
425 req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
426 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
427 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
428 req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
429 skb->priority = CPL_PRIORITY_ACK;
430 cxgb3_ofld_send(c3cn->cdev, skb);
431 return credits;
432}
433
434/*
435 * CPL connection tx data: host ->
436 *
437 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
438 * credits sent.
439 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
440 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
441 */
442
443/*
444 * For ULP connections HW may inserts digest bytes into the pdu. Those digest
445 * bytes are not sent by the host but are part of the TCP payload and therefore
446 * consume TCP sequence space.
447 */
448static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
449static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
450{
451 return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
452}
453
454static unsigned int wrlen __read_mostly;
455
456/*
457 * The number of WRs needed for an skb depends on the number of fragments
458 * in the skb and whether it has any payload in its main body. This maps the
459 * length of the gather list represented by an skb into the # of necessary WRs.
460 *
461 * The max. length of an skb is controlled by the max pdu size which is ~16K.
462 * Also, assume the min. fragment length is the sector size (512), then add
463 * extra fragment counts for iscsi bhs and payload padding.
464 */
465#define SKB_WR_LIST_SIZE (16384/512 + 3)
466static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
467
468static void s3_init_wr_tab(unsigned int wr_len)
469{
470 int i;
471
472 if (skb_wrs[1]) /* already initialized */
473 return;
474
475 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
476 int sgl_len = (3 * i) / 2 + (i & 1);
477
478 sgl_len += 3;
479 skb_wrs[i] = (sgl_len <= wr_len
480 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
481 }
482
483 wrlen = wr_len * 8;
484}
485
486static inline void reset_wr_list(struct s3_conn *c3cn)
487{
488 c3cn->wr_pending_head = NULL;
489}
490
491/*
492 * Add a WR to a connections's list of pending WRs. This is a singly-linked
493 * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
494 * and the tail in wr_pending_tail.
495 */
496static inline void enqueue_wr(struct s3_conn *c3cn,
497 struct sk_buff *skb)
498{
499 skb_wr_data(skb) = NULL;
500
501 /*
502 * We want to take an extra reference since both us and the driver
503 * need to free the packet before it's really freed. We know there's
504 * just one user currently so we use atomic_set rather than skb_get
505 * to avoid the atomic op.
506 */
507 atomic_set(&skb->users, 2);
508
509 if (!c3cn->wr_pending_head)
510 c3cn->wr_pending_head = skb;
511 else
512 skb_wr_data(skb) = skb;
513 c3cn->wr_pending_tail = skb;
514}
515
516static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
517{
518 return c3cn->wr_pending_head;
519}
520
521static inline void free_wr_skb(struct sk_buff *skb)
522{
523 kfree_skb(skb);
524}
525
526static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
527{
528 struct sk_buff *skb = c3cn->wr_pending_head;
529
530 if (likely(skb)) {
531 /* Don't bother clearing the tail */
532 c3cn->wr_pending_head = skb_wr_data(skb);
533 skb_wr_data(skb) = NULL;
534 }
535 return skb;
536}
537
538static void purge_wr_queue(struct s3_conn *c3cn)
539{
540 struct sk_buff *skb;
541 while ((skb = dequeue_wr(c3cn)) != NULL)
542 free_wr_skb(skb);
543}
544
545static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
546 int len)
547{
548 struct tx_data_wr *req;
549
550 skb_reset_transport_header(skb);
551 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
552 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
553 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
554 req->sndseq = htonl(c3cn->snd_nxt);
555 /* len includes the length of any HW ULP additions */
556 req->len = htonl(len);
557 req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
558 /* V_TX_ULP_SUBMODE sets both the mode and submode */
559 req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
560 V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
561
562 if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
563 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
564 V_TX_CPU_IDX(c3cn->qset));
565 /* Sendbuffer is in units of 32KB. */
566 req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
567 c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
568 }
569}
570
571/**
572 * c3cn_push_tx_frames -- start transmit
573 * @c3cn: the offloaded connection
574 * @req_completion: request wr_ack or not
575 *
576 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
577 * connection's send queue and sends them on to T3. Must be called with the
578 * connection's lock held. Returns the amount of send buffer space that was
579 * freed as a result of sending queued data to T3.
580 */
581static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
582{
583 kfree_skb(skb);
584}
585
586static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
587{
588 int total_size = 0;
589 struct sk_buff *skb;
590 struct t3cdev *cdev;
591 struct cxgb3i_sdev_data *cdata;
592
593 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
594 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
595 c3cn->state == C3CN_STATE_ABORTING)) {
596 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
597 c3cn, c3cn->state);
598 return 0;
599 }
600
601 cdev = c3cn->cdev;
602 cdata = CXGB3_SDEV_DATA(cdev);
603
604 while (c3cn->wr_avail
605 && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
606 int len = skb->len; /* length before skb_push */
607 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
608 int wrs_needed = skb_wrs[frags];
609
610 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
611 wrs_needed = 1;
612
613 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
614
615 if (c3cn->wr_avail < wrs_needed) {
616 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
617 "wr %d < %u.\n",
618 c3cn, skb->len, skb->datalen, frags,
619 wrs_needed, c3cn->wr_avail);
620 break;
621 }
622
623 __skb_unlink(skb, &c3cn->write_queue);
624 skb->priority = CPL_PRIORITY_DATA;
625 skb->csum = wrs_needed; /* remember this until the WR_ACK */
626 c3cn->wr_avail -= wrs_needed;
627 c3cn->wr_unacked += wrs_needed;
628 enqueue_wr(c3cn, skb);
629
630 if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) {
631 len += ulp_extra_len(skb);
632 make_tx_data_wr(c3cn, skb, len);
633 c3cn->snd_nxt += len;
634 if ((req_completion
635 && c3cn->wr_unacked == wrs_needed)
636 || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
637 || c3cn->wr_unacked >= c3cn->wr_max / 2) {
638 struct work_request_hdr *wr = cplhdr(skb);
639
640 wr->wr_hi |= htonl(F_WR_COMPL);
641 c3cn->wr_unacked = 0;
642 }
643 CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
644 }
645
646 total_size += skb->truesize;
647 set_arp_failure_handler(skb, arp_failure_discard);
648 l2t_send(cdev, skb, c3cn->l2t);
649 }
650 return total_size;
651}
652
653/*
654 * process_cpl_msg: -> host
655 * Top-level CPL message processing used by most CPL messages that
656 * pertain to connections.
657 */
658static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
659 struct sk_buff *),
660 struct s3_conn *c3cn,
661 struct sk_buff *skb)
662{
663 spin_lock_bh(&c3cn->lock);
664 fn(c3cn, skb);
665 spin_unlock_bh(&c3cn->lock);
666}
667
668/*
669 * process_cpl_msg_ref: -> host
670 * Similar to process_cpl_msg() but takes an extra connection reference around
671 * the call to the handler. Should be used if the handler may drop a
672 * connection reference.
673 */
674static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
675 struct sk_buff *),
676 struct s3_conn *c3cn,
677 struct sk_buff *skb)
678{
679 c3cn_hold(c3cn);
680 process_cpl_msg(fn, c3cn, skb);
681 c3cn_put(c3cn);
682}
683
684/*
685 * Process a CPL_ACT_ESTABLISH message: -> host
686 * Updates connection state from an active establish CPL message. Runs with
687 * the connection lock held.
688 */
689
690static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
691{
692 struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
693 if (c3cn)
694 c3cn_put(c3cn);
695}
696
697static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
698 unsigned int opt)
699{
700 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
701
702 c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
703
704 /*
705 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
706 * pass through opt0.
707 */
708 if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
709 c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
710
711 dst_confirm(c3cn->dst_cache);
712
713 smp_mb();
714
715 c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
716}
717
718static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
719{
720 struct cpl_act_establish *req = cplhdr(skb);
721 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
722
723 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
724 c3cn, c3cn->state, c3cn->flags);
725
726 if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
727 cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
728 c3cn->tid, c3cn->state);
729
730 c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
731 c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
732
733 __kfree_skb(skb);
734
735 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
736 /* upper layer has requested closing */
737 send_abort_req(c3cn);
738 else if (c3cn_push_tx_frames(c3cn, 1))
739 cxgb3i_conn_tx_open(c3cn);
740}
741
742static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
743 void *ctx)
744{
745 struct cpl_act_establish *req = cplhdr(skb);
746 unsigned int tid = GET_TID(req);
747 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
748 struct s3_conn *c3cn = ctx;
749 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
750
751 c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
752 tid, c3cn, c3cn->state, c3cn->flags);
753
754 c3cn->tid = tid;
755 c3cn_hold(c3cn);
756 cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
757 s3_free_atid(cdev, atid);
758
759 c3cn->qset = G_QNUM(ntohl(skb->csum));
760
761 process_cpl_msg(process_act_establish, c3cn, skb);
762 return 0;
763}
764
765/*
766 * Process a CPL_ACT_OPEN_RPL message: -> host
767 * Handle active open failures.
768 */
769static int act_open_rpl_status_to_errno(int status)
770{
771 switch (status) {
772 case CPL_ERR_CONN_RESET:
773 return ECONNREFUSED;
774 case CPL_ERR_ARP_MISS:
775 return EHOSTUNREACH;
776 case CPL_ERR_CONN_TIMEDOUT:
777 return ETIMEDOUT;
778 case CPL_ERR_TCAM_FULL:
779 return ENOMEM;
780 case CPL_ERR_CONN_EXIST:
781 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
782 return EADDRINUSE;
783 default:
784 return EIO;
785 }
786}
787
788static void act_open_retry_timer(unsigned long data)
789{
790 struct sk_buff *skb;
791 struct s3_conn *c3cn = (struct s3_conn *)data;
792
793 c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
794
795 spin_lock_bh(&c3cn->lock);
796 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
797 if (!skb)
798 fail_act_open(c3cn, ENOMEM);
799 else {
800 skb->sk = (struct sock *)c3cn;
801 set_arp_failure_handler(skb, act_open_req_arp_failure);
802 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
803 l2t_send(c3cn->cdev, skb, c3cn->l2t);
804 }
805 spin_unlock_bh(&c3cn->lock);
806 c3cn_put(c3cn);
807}
808
809static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
810{
811 struct cpl_act_open_rpl *rpl = cplhdr(skb);
812
813 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
814 c3cn, c3cn->state, c3cn->flags);
815
816 if (rpl->status == CPL_ERR_CONN_EXIST &&
817 c3cn->retry_timer.function != act_open_retry_timer) {
818 c3cn->retry_timer.function = act_open_retry_timer;
819 if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
820 c3cn_hold(c3cn);
821 } else
822 fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
823 __kfree_skb(skb);
824}
825
826static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
827{
828 struct s3_conn *c3cn = ctx;
829 struct cpl_act_open_rpl *rpl = cplhdr(skb);
830
831 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
832 rpl->status, c3cn, c3cn->state, c3cn->flags);
833
834 if (rpl->status != CPL_ERR_TCAM_FULL &&
835 rpl->status != CPL_ERR_CONN_EXIST &&
836 rpl->status != CPL_ERR_ARP_MISS)
837 cxgb3_queue_tid_release(cdev, GET_TID(rpl));
838
839 process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
840 return 0;
841}
842
843/*
844 * Process PEER_CLOSE CPL messages: -> host
845 * Handle peer FIN.
846 */
847static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
848{
849 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
850 c3cn, c3cn->state, c3cn->flags);
851
852 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
853 goto out;
854
855 switch (c3cn->state) {
856 case C3CN_STATE_ESTABLISHED:
857 c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
858 break;
859 case C3CN_STATE_ACTIVE_CLOSE:
860 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
861 break;
862 case C3CN_STATE_CLOSE_WAIT_1:
863 c3cn_closed(c3cn);
864 break;
865 case C3CN_STATE_ABORTING:
866 break;
867 default:
868 cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
869 c3cn->cdev->name, c3cn->tid, c3cn->state);
870 }
871
872 cxgb3i_conn_closing(c3cn);
873out:
874 __kfree_skb(skb);
875}
876
877static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
878{
879 struct s3_conn *c3cn = ctx;
880
881 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
882 c3cn, c3cn->state, c3cn->flags);
883 process_cpl_msg_ref(process_peer_close, c3cn, skb);
884 return 0;
885}
886
887/*
888 * Process CLOSE_CONN_RPL CPL message: -> host
889 * Process a peer ACK to our FIN.
890 */
891static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
892{
893 struct cpl_close_con_rpl *rpl = cplhdr(skb);
894
895 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
896 c3cn, c3cn->state, c3cn->flags);
897
898 c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
899
900 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
901 goto out;
902
903 switch (c3cn->state) {
904 case C3CN_STATE_ACTIVE_CLOSE:
905 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
906 break;
907 case C3CN_STATE_CLOSE_WAIT_1:
908 case C3CN_STATE_CLOSE_WAIT_2:
909 c3cn_closed(c3cn);
910 break;
911 case C3CN_STATE_ABORTING:
912 break;
913 default:
914 cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
915 c3cn->cdev->name, c3cn->tid, c3cn->state);
916 }
917
918out:
919 kfree_skb(skb);
920}
921
922static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
923 void *ctx)
924{
925 struct s3_conn *c3cn = ctx;
926
927 c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
928 c3cn, c3cn->state, c3cn->flags);
929
930 process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
931 return 0;
932}
933
934/*
935 * Process ABORT_REQ_RSS CPL message: -> host
936 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
937 * request except that we need to reply to it.
938 */
939
940static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
941 int *need_rst)
942{
943 switch (abort_reason) {
944 case CPL_ERR_BAD_SYN: /* fall through */
945 case CPL_ERR_CONN_RESET:
946 return c3cn->state > C3CN_STATE_ESTABLISHED ?
947 EPIPE : ECONNRESET;
948 case CPL_ERR_XMIT_TIMEDOUT:
949 case CPL_ERR_PERSIST_TIMEDOUT:
950 case CPL_ERR_FINWAIT2_TIMEDOUT:
951 case CPL_ERR_KEEPALIVE_TIMEDOUT:
952 return ETIMEDOUT;
953 default:
954 return EIO;
955 }
956}
957
958static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
959{
960 int rst_status = CPL_ABORT_NO_RST;
961 const struct cpl_abort_req_rss *req = cplhdr(skb);
962
963 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
964 c3cn, c3cn->state, c3cn->flags);
965
966 if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
967 c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
968 c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
969 __kfree_skb(skb);
970 return;
971 }
972
973 c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
974 send_abort_rpl(c3cn, rst_status);
975
976 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
977 c3cn->err =
978 abort_status_to_errno(c3cn, req->status, &rst_status);
979 c3cn_closed(c3cn);
980 }
981}
982
983static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
984{
985 const struct cpl_abort_req_rss *req = cplhdr(skb);
986 struct s3_conn *c3cn = ctx;
987
988 c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
989 c3cn, c3cn->state, c3cn->flags);
990
991 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
992 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
993 __kfree_skb(skb);
994 return 0;
995 }
996
997 process_cpl_msg_ref(process_abort_req, c3cn, skb);
998 return 0;
999}
1000
1001/*
1002 * Process ABORT_RPL_RSS CPL message: -> host
1003 * Process abort replies. We only process these messages if we anticipate
1004 * them as the coordination between SW and HW in this area is somewhat lacking
1005 * and sometimes we get ABORT_RPLs after we are done with the connection that
1006 * originated the ABORT_REQ.
1007 */
1008static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
1009{
1010 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1011 c3cn, c3cn->state, c3cn->flags);
1012
1013 if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
1014 if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
1015 c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1016 else {
1017 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
1018 c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
1019 if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
1020 cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
1021 c3cn->cdev->name, c3cn->tid);
1022 c3cn_closed(c3cn);
1023 }
1024 }
1025 __kfree_skb(skb);
1026}
1027
1028static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1029{
1030 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1031 struct s3_conn *c3cn = ctx;
1032
1033 c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
1034 rpl->status, c3cn, c3cn ? c3cn->state : 0,
1035 c3cn ? c3cn->flags : 0UL);
1036
1037 /*
1038 * Ignore replies to post-close aborts indicating that the abort was
1039 * requested too late. These connections are terminated when we get
1040 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
1041 * arrives the TID is either no longer used or it has been recycled.
1042 */
1043 if (rpl->status == CPL_ERR_ABORT_FAILED)
1044 goto discard;
1045
1046 /*
1047 * Sometimes we've already closed the connection, e.g., a post-close
1048 * abort races with ABORT_REQ_RSS, the latter frees the connection
1049 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
1050 * but FW turns the ABORT_REQ into a regular one and so we get
1051 * ABORT_RPL_RSS with status 0 and no connection.
1052 */
1053 if (!c3cn)
1054 goto discard;
1055
1056 process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
1057 return 0;
1058
1059discard:
1060 __kfree_skb(skb);
1061 return 0;
1062}
1063
1064/*
1065 * Process RX_ISCSI_HDR CPL message: -> host
1066 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
1067 * follow after the bhs.
1068 */
1069static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1070{
1071 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
1072 struct cpl_iscsi_hdr_norss data_cpl;
1073 struct cpl_rx_data_ddp_norss ddp_cpl;
1074 unsigned int hdr_len, data_len, status;
1075 unsigned int len;
1076 int err;
1077
1078 if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
1079 if (c3cn->state != C3CN_STATE_ABORTING)
1080 send_abort_req(c3cn);
1081 __kfree_skb(skb);
1082 return;
1083 }
1084
1085 CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq);
1086 CXGB3_SKB_CB(skb)->flags = 0;
1087
1088 skb_reset_transport_header(skb);
1089 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
1090
1091 len = hdr_len = ntohs(hdr_cpl->len);
1092 /* msg coalesce is off or not enough data received */
1093 if (skb->len <= hdr_len) {
1094 cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
1095 c3cn->cdev->name, c3cn->tid,
1096 skb->len, hdr_len);
1097 goto abort_conn;
1098 }
1099
1100 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
1101 sizeof(ddp_cpl));
1102 if (err < 0)
1103 goto abort_conn;
1104
1105 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1106 skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len);
1107 skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1108 status = ntohl(ddp_cpl.ddp_status);
1109
1110 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1111 skb, skb->len, skb_ulp_pdulen(skb), status);
1112
1113 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1114 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
1115 if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
1116 skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
1117 if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
1118 skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
1119
1120 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
1121 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
1122 if (err < 0)
1123 goto abort_conn;
1124 data_len = ntohs(data_cpl.len);
1125 len += sizeof(data_cpl) + data_len;
1126 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1127 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1128
1129 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb);
1130 __pskb_trim(skb, len);
1131 __skb_queue_tail(&c3cn->receive_queue, skb);
1132 cxgb3i_conn_pdu_ready(c3cn);
1133
1134 return;
1135
1136abort_conn:
1137 send_abort_req(c3cn);
1138 __kfree_skb(skb);
1139}
1140
1141static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1142{
1143 struct s3_conn *c3cn = ctx;
1144
1145 process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
1146 return 0;
1147}
1148
1149/*
1150 * Process TX_DATA_ACK CPL messages: -> host
1151 * Process an acknowledgment of WR completion. Advance snd_una and send the
1152 * next batch of work requests from the write queue.
1153 */
1154static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1155{
1156 struct cpl_wr_ack *hdr = cplhdr(skb);
1157 unsigned int credits = ntohs(hdr->credits);
1158 u32 snd_una = ntohl(hdr->snd_una);
1159
1160 c3cn->wr_avail += credits;
1161 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1162 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
1163
1164 while (credits) {
1165 struct sk_buff *p = peek_wr(c3cn);
1166
1167 if (unlikely(!p)) {
1168 cxgb3i_log_error("%u WR_ACK credits for TID %u with "
1169 "nothing pending, state %u\n",
1170 credits, c3cn->tid, c3cn->state);
1171 break;
1172 }
1173 if (unlikely(credits < p->csum)) {
1174 p->csum -= credits;
1175 break;
1176 } else {
1177 dequeue_wr(c3cn);
1178 credits -= p->csum;
1179 free_wr_skb(p);
1180 }
1181 }
1182
1183 if (unlikely(before(snd_una, c3cn->snd_una)))
1184 goto out_free;
1185
1186 if (c3cn->snd_una != snd_una) {
1187 c3cn->snd_una = snd_una;
1188 dst_confirm(c3cn->dst_cache);
1189 }
1190
1191 if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
1192 cxgb3i_conn_tx_open(c3cn);
1193out_free:
1194 __kfree_skb(skb);
1195}
1196
1197static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1198{
1199 struct s3_conn *c3cn = ctx;
1200
1201 process_cpl_msg(process_wr_ack, c3cn, skb);
1202 return 0;
1203}
1204
1205/*
1206 * for each connection, pre-allocate skbs needed for close/abort requests. So
1207 * that we can service the request right away.
1208 */
1209static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
1210{
1211 if (c3cn->cpl_close)
1212 kfree_skb(c3cn->cpl_close);
1213 if (c3cn->cpl_abort_req)
1214 kfree_skb(c3cn->cpl_abort_req);
1215 if (c3cn->cpl_abort_rpl)
1216 kfree_skb(c3cn->cpl_abort_rpl);
1217}
1218
1219static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
1220{
1221 c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
1222 GFP_KERNEL);
1223 if (!c3cn->cpl_close)
1224 return -ENOMEM;
1225 skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
1226
1227 c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
1228 GFP_KERNEL);
1229 if (!c3cn->cpl_abort_req)
1230 goto free_cpl_skbs;
1231 skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
1232
1233 c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
1234 GFP_KERNEL);
1235 if (!c3cn->cpl_abort_rpl)
1236 goto free_cpl_skbs;
1237 skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
1238
1239 return 0;
1240
1241free_cpl_skbs:
1242 c3cn_free_cpl_skbs(c3cn);
1243 return -ENOMEM;
1244}
1245
1246/**
1247 * c3cn_release_offload_resources - release offload resource
1248 * @c3cn: the offloaded iscsi tcp connection.
1249 * Release resources held by an offload connection (TID, L2T entry, etc.)
1250 */
1251static void c3cn_release_offload_resources(struct s3_conn *c3cn)
1252{
1253 struct t3cdev *cdev = c3cn->cdev;
1254 unsigned int tid = c3cn->tid;
1255
1256 if (!cdev)
1257 return;
1258
1259 c3cn->qset = 0;
1260
1261 c3cn_free_cpl_skbs(c3cn);
1262
1263 if (c3cn->wr_avail != c3cn->wr_max) {
1264 purge_wr_queue(c3cn);
1265 reset_wr_list(c3cn);
1266 }
1267
1268 if (c3cn->l2t) {
1269 l2t_release(L2DATA(cdev), c3cn->l2t);
1270 c3cn->l2t = NULL;
1271 }
1272
1273 if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
1274 s3_free_atid(cdev, tid);
1275 else { /* we have TID */
1276 cxgb3_remove_tid(cdev, (void *)c3cn, tid);
1277 c3cn_put(c3cn);
1278 }
1279
1280 c3cn->cdev = NULL;
1281}
1282
1283/**
1284 * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
1285 * returns the s3_conn structure allocated.
1286 */
1287struct s3_conn *cxgb3i_c3cn_create(void)
1288{
1289 struct s3_conn *c3cn;
1290
1291 c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
1292 if (!c3cn)
1293 return NULL;
1294
1295 /* pre-allocate close/abort cpl, so we don't need to wait for memory
1296 when close/abort is requested. */
1297 if (c3cn_alloc_cpl_skbs(c3cn) < 0)
1298 goto free_c3cn;
1299
1300 c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
1301
1302 c3cn->flags = 0;
1303 spin_lock_init(&c3cn->lock);
1304 atomic_set(&c3cn->refcnt, 1);
1305 skb_queue_head_init(&c3cn->receive_queue);
1306 skb_queue_head_init(&c3cn->write_queue);
1307 setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
1308 rwlock_init(&c3cn->callback_lock);
1309
1310 return c3cn;
1311
1312free_c3cn:
1313 kfree(c3cn);
1314 return NULL;
1315}
1316
1317static void c3cn_active_close(struct s3_conn *c3cn)
1318{
1319 int data_lost;
1320 int close_req = 0;
1321
1322 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1323 c3cn, c3cn->state, c3cn->flags);
1324
1325 dst_confirm(c3cn->dst_cache);
1326
1327 c3cn_hold(c3cn);
1328 spin_lock_bh(&c3cn->lock);
1329
1330 data_lost = skb_queue_len(&c3cn->receive_queue);
1331 __skb_queue_purge(&c3cn->receive_queue);
1332
1333 switch (c3cn->state) {
1334 case C3CN_STATE_CLOSED:
1335 case C3CN_STATE_ACTIVE_CLOSE:
1336 case C3CN_STATE_CLOSE_WAIT_1:
1337 case C3CN_STATE_CLOSE_WAIT_2:
1338 case C3CN_STATE_ABORTING:
1339 /* nothing need to be done */
1340 break;
1341 case C3CN_STATE_CONNECTING:
1342 /* defer until cpl_act_open_rpl or cpl_act_establish */
1343 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1344 break;
1345 case C3CN_STATE_ESTABLISHED:
1346 close_req = 1;
1347 c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
1348 break;
1349 case C3CN_STATE_PASSIVE_CLOSE:
1350 close_req = 1;
1351 c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
1352 break;
1353 }
1354
1355 if (close_req) {
1356 if (data_lost)
1357 /* Unread data was tossed, zap the connection. */
1358 send_abort_req(c3cn);
1359 else
1360 send_close_req(c3cn);
1361 }
1362
1363 spin_unlock_bh(&c3cn->lock);
1364 c3cn_put(c3cn);
1365}
1366
1367/**
1368 * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
1369 * resource held
1370 * @c3cn: the iscsi tcp connection
1371 */
1372void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1373{
1374 c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
1375 c3cn, c3cn->state, c3cn->flags);
1376 if (likely(c3cn->state != C3CN_STATE_CONNECTING))
1377 c3cn_active_close(c3cn);
1378 else
1379 c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
1380 c3cn_put(c3cn);
1381}
1382
1383static int is_cxgb3_dev(struct net_device *dev)
1384{
1385 struct cxgb3i_sdev_data *cdata;
1386
1387 write_lock(&cdata_rwlock);
1388 list_for_each_entry(cdata, &cdata_list, list) {
1389 struct adap_ports *ports = &cdata->ports;
1390 int i;
1391
1392 for (i = 0; i < ports->nports; i++)
1393 if (dev == ports->lldevs[i]) {
1394 write_unlock(&cdata_rwlock);
1395 return 1;
1396 }
1397 }
1398 write_unlock(&cdata_rwlock);
1399 return 0;
1400}
1401
1402/**
1403 * cxgb3_egress_dev - return the cxgb3 egress device
1404 * @root_dev: the root device anchoring the search
1405 * @c3cn: the connection used to determine egress port in bonding mode
1406 * @context: in bonding mode, indicates a connection set up or failover
1407 *
1408 * Return egress device or NULL if the egress device isn't one of our ports.
1409 */
1410static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1411 struct s3_conn *c3cn,
1412 int context)
1413{
1414 while (root_dev) {
1415 if (root_dev->priv_flags & IFF_802_1Q_VLAN)
1416 root_dev = vlan_dev_real_dev(root_dev);
1417 else if (is_cxgb3_dev(root_dev))
1418 return root_dev;
1419 else
1420 return NULL;
1421 }
1422 return NULL;
1423}
1424
1425static struct rtable *find_route(__be32 saddr, __be32 daddr,
1426 __be16 sport, __be16 dport)
1427{
1428 struct rtable *rt;
1429 struct flowi fl = {
1430 .oif = 0,
1431 .nl_u = {
1432 .ip4_u = {
1433 .daddr = daddr,
1434 .saddr = saddr,
1435 .tos = 0 } },
1436 .proto = IPPROTO_TCP,
1437 .uli_u = {
1438 .ports = {
1439 .sport = sport,
1440 .dport = dport } } };
1441
1442 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
1443 return NULL;
1444 return rt;
1445}
1446
1447/*
1448 * Assign offload parameters to some connection fields.
1449 */
1450static void init_offload_conn(struct s3_conn *c3cn,
1451 struct t3cdev *cdev,
1452 struct dst_entry *dst)
1453{
1454 BUG_ON(c3cn->cdev != cdev);
1455 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs;
1456 c3cn->wr_unacked = 0;
1457 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1458
1459 reset_wr_list(c3cn);
1460}
1461
1462static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
1463{
1464 struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
1465 struct t3cdev *cdev = cdata->cdev;
1466 struct dst_entry *dst = c3cn->dst_cache;
1467 struct sk_buff *skb;
1468
1469 c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1470 c3cn, c3cn->state, c3cn->flags);
1471 /*
1472 * Initialize connection data. Note that the flags and ULP mode are
1473 * initialized higher up ...
1474 */
1475 c3cn->dev = dev;
1476 c3cn->cdev = cdev;
1477 c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
1478 if (c3cn->tid < 0)
1479 goto out_err;
1480
1481 c3cn->qset = 0;
1482 c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
1483 if (!c3cn->l2t)
1484 goto free_tid;
1485
1486 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
1487 if (!skb)
1488 goto free_l2t;
1489
1490 skb->sk = (struct sock *)c3cn;
1491 set_arp_failure_handler(skb, act_open_req_arp_failure);
1492
1493 c3cn_hold(c3cn);
1494
1495 init_offload_conn(c3cn, cdev, dst);
1496 c3cn->err = 0;
1497
1498 make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1499 l2t_send(cdev, skb, c3cn->l2t);
1500 return 0;
1501
1502free_l2t:
1503 l2t_release(L2DATA(cdev), c3cn->l2t);
1504free_tid:
1505 s3_free_atid(cdev, c3cn->tid);
1506 c3cn->tid = 0;
1507out_err:
1508 return -1;
1509}
1510
1511
1512/**
1513 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
1514 * @c3cn: the iscsi tcp connection
1515 * @usin: destination address
1516 *
1517 * return 0 if active open request is sent, < 0 otherwise.
1518 */
1519int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
1520{
1521 struct rtable *rt;
1522 struct net_device *dev;
1523 struct cxgb3i_sdev_data *cdata;
1524 struct t3cdev *cdev;
1525 __be32 sipv4;
1526 int err;
1527
1528 if (usin->sin_family != AF_INET)
1529 return -EAFNOSUPPORT;
1530
1531 c3cn->daddr.sin_port = usin->sin_port;
1532 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1533
1534 rt = find_route(c3cn->saddr.sin_addr.s_addr,
1535 c3cn->daddr.sin_addr.s_addr,
1536 c3cn->saddr.sin_port,
1537 c3cn->daddr.sin_port);
1538 if (rt == NULL) {
1539 c3cn_conn_debug("NO route to 0x%x, port %u.\n",
1540 c3cn->daddr.sin_addr.s_addr,
1541 ntohs(c3cn->daddr.sin_port));
1542 return -ENETUNREACH;
1543 }
1544
1545 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1546 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
1547 c3cn->daddr.sin_addr.s_addr,
1548 ntohs(c3cn->daddr.sin_port));
1549 ip_rt_put(rt);
1550 return -ENETUNREACH;
1551 }
1552
1553 if (!c3cn->saddr.sin_addr.s_addr)
1554 c3cn->saddr.sin_addr.s_addr = rt->rt_src;
1555
1556 /* now commit destination to connection */
1557 c3cn->dst_cache = &rt->u.dst;
1558
1559 /* try to establish an offloaded connection */
1560 dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
1561 if (dev == NULL) {
1562 c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
1563 return -ENETUNREACH;
1564 }
1565 cdata = NDEV2CDATA(dev);
1566 cdev = cdata->cdev;
1567
1568 /* get a source port if one hasn't been provided */
1569 err = c3cn_get_port(c3cn, cdata);
1570 if (err)
1571 return err;
1572
1573 c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1574 c3cn, ntohs(c3cn->saddr.sin_port));
1575
1576 sipv4 = cxgb3i_get_private_ipv4addr(dev);
1577 if (!sipv4) {
1578 c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
1579 sipv4 = c3cn->saddr.sin_addr.s_addr;
1580 cxgb3i_set_private_ipv4addr(dev, sipv4);
1581 } else
1582 c3cn->saddr.sin_addr.s_addr = sipv4;
1583
1584 c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
1585 c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
1586 ntohs(c3cn->saddr.sin_port),
1587 NIPQUAD(c3cn->daddr.sin_addr.s_addr),
1588 ntohs(c3cn->daddr.sin_port));
1589
1590 c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
1591 if (!initiate_act_open(c3cn, dev))
1592 return 0;
1593
1594 /*
1595 * If we get here, we don't have an offload connection so simply
1596 * return a failure.
1597 */
1598 err = -ENOTSUPP;
1599
1600 /*
1601 * This trashes the connection and releases the local port,
1602 * if necessary.
1603 */
1604 c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
1605 c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
1606 ip_rt_put(rt);
1607 c3cn_put_port(c3cn);
1608 c3cn->daddr.sin_port = 0;
1609 return err;
1610}
1611
1612/**
1613 * cxgb3i_c3cn_rx_credits - ack received tcp data.
1614 * @c3cn: iscsi tcp connection
1615 * @copied: # of bytes processed
1616 *
1617 * Called after some received data has been read. It returns RX credits
1618 * to the HW for the amount of data processed.
1619 */
1620void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
1621{
1622 struct t3cdev *cdev;
1623 int must_send;
1624 u32 credits, dack = 0;
1625
1626 if (c3cn->state != C3CN_STATE_ESTABLISHED)
1627 return;
1628
1629 credits = c3cn->copied_seq - c3cn->rcv_wup;
1630 if (unlikely(!credits))
1631 return;
1632
1633 cdev = c3cn->cdev;
1634
1635 if (unlikely(cxgb3_rx_credit_thres == 0))
1636 return;
1637
1638 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
1639
1640 /*
1641 * For coalescing to work effectively ensure the receive window has
1642 * at least 16KB left.
1643 */
1644 must_send = credits + 16384 >= cxgb3_rcv_win;
1645
1646 if (must_send || credits >= cxgb3_rx_credit_thres)
1647 c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
1648}
1649
1650/**
1651 * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
1652 * @c3cn: iscsi tcp connection
1653 * @skb: skb contains the iscsi pdu
1654 *
1655 * Add a list of skbs to a connection send queue. The skbs must comply with
1656 * the max size limit of the device and have a headroom of at least
1657 * TX_HEADER_LEN bytes.
1658 * Return # of bytes queued.
1659 */
1660int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1661{
1662 struct sk_buff *next;
1663 int err, copied = 0;
1664
1665 spin_lock_bh(&c3cn->lock);
1666
1667 if (c3cn->state != C3CN_STATE_ESTABLISHED) {
1668 c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
1669 c3cn, c3cn->state);
1670 err = -EAGAIN;
1671 goto out_err;
1672 }
1673
1674 err = -EPIPE;
1675 if (c3cn->err) {
1676 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1677 goto out_err;
1678 }
1679
1680 while (skb) {
1681 int frags = skb_shinfo(skb)->nr_frags +
1682 (skb->len != skb->data_len);
1683
1684 if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
1685 c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
1686 err = -EINVAL;
1687 goto out_err;
1688 }
1689
1690 if (frags >= SKB_WR_LIST_SIZE) {
1691 cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1692 c3cn, skb_shinfo(skb)->nr_frags,
1693 skb->len, skb->data_len);
1694 err = -EINVAL;
1695 goto out_err;
1696 }
1697
1698 next = skb->next;
1699 skb->next = NULL;
1700 skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
1701 copied += skb->len;
1702 c3cn->write_seq += skb->len + ulp_extra_len(skb);
1703 skb = next;
1704 }
1705done:
1706 if (likely(skb_queue_len(&c3cn->write_queue)))
1707 c3cn_push_tx_frames(c3cn, 1);
1708 spin_unlock_bh(&c3cn->lock);
1709 return copied;
1710
1711out_err:
1712 if (copied == 0 && err == -EPIPE)
1713 copied = c3cn->err ? c3cn->err : -EPIPE;
1714 goto done;
1715}
1716
1717static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
1718{
1719 struct adap_ports *ports = &cdata->ports;
1720 int i;
1721
1722 for (i = 0; i < ports->nports; i++)
1723 NDEV2CDATA(ports->lldevs[i]) = NULL;
1724 cxgb3i_free_big_mem(cdata);
1725}
1726
1727void cxgb3i_sdev_cleanup(void)
1728{
1729 struct cxgb3i_sdev_data *cdata;
1730
1731 write_lock(&cdata_rwlock);
1732 list_for_each_entry(cdata, &cdata_list, list) {
1733 list_del(&cdata->list);
1734 sdev_data_cleanup(cdata);
1735 }
1736 write_unlock(&cdata_rwlock);
1737}
1738
1739int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
1740{
1741 cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
1742 cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
1743 cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
1744 cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
1745 cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
1746 cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
1747 cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
1748 cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
1749
1750 if (cxgb3_max_connect > CXGB3I_MAX_CONN)
1751 cxgb3_max_connect = CXGB3I_MAX_CONN;
1752 return 0;
1753}
1754
1755/**
1756 * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
1757 * @cdev: t3cdev adapter
1758 * @client: cxgb3 driver client
1759 */
1760void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
1761{
1762 struct cxgb3i_sdev_data *cdata;
1763 struct ofld_page_info rx_page_info;
1764 unsigned int wr_len;
1765 int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
1766 8 * sizeof(unsigned long));
1767 int i;
1768
1769 cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
1770 if (!cdata)
1771 return;
1772
1773 if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
1774 cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
1775 cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
1776 goto free_cdata;
1777
1778 s3_init_wr_tab(wr_len);
1779
1780 INIT_LIST_HEAD(&cdata->list);
1781 cdata->cdev = cdev;
1782 cdata->client = client;
1783
1784 for (i = 0; i < cdata->ports.nports; i++)
1785 NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
1786
1787 write_lock(&cdata_rwlock);
1788 list_add_tail(&cdata->list, &cdata_list);
1789 write_unlock(&cdata_rwlock);
1790
1791 return;
1792
1793free_cdata:
1794 cxgb3i_free_big_mem(cdata);
1795}
1796
1797/**
1798 * cxgb3i_sdev_remove - free the allocated resources for the adapter
1799 * @cdev: t3cdev adapter
1800 */
1801void cxgb3i_sdev_remove(struct t3cdev *cdev)
1802{
1803 struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
1804
1805 write_lock(&cdata_rwlock);
1806 list_del(&cdata->list);
1807 write_unlock(&cdata_rwlock);
1808
1809 sdev_data_cleanup(cdata);
1810}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
new file mode 100644
index 000000000000..d23156907ffd
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -0,0 +1,231 @@
1/*
2 * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15#ifndef _CXGB3I_OFFLOAD_H
16#define _CXGB3I_OFFLOAD_H
17
18#include <linux/skbuff.h>
19#include <net/tcp.h>
20
21#include "common.h"
22#include "adapter.h"
23#include "t3cdev.h"
24#include "cxgb3_offload.h"
25
26#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
27#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
28#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
29#define cxgb3i_log_debug(fmt, args...) \
30 printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
31
32/**
33 * struct s3_conn - an iscsi tcp connection structure
34 *
35 * @dev: net device of with connection
36 * @cdev: adapter t3cdev for net device
37 * @flags: see c3cn_flags below
38 * @tid: connection id assigned by the h/w
39 * @qset: queue set used by connection
40 * @mss_idx: Maximum Segment Size table index
41 * @l2t: ARP resolution entry for offload packets
42 * @wr_max: maximum in-flight writes
43 * @wr_avail: number of writes available
44 * @wr_unacked: writes since last request for completion notification
45 * @wr_pending_head: head of pending write queue
46 * @wr_pending_tail: tail of pending write queue
47 * @cpl_close: skb for cpl_close_req
48 * @cpl_abort_req: skb for cpl_abort_req
49 * @cpl_abort_rpl: skb for cpl_abort_rpl
50 * @lock: connection status lock
51 * @refcnt: reference count on connection
52 * @state: connection state
53 * @saddr: source ip/port address
54 * @daddr: destination ip/port address
55 * @dst_cache: reference to destination route
56 * @receive_queue: received PDUs
57 * @write_queue: un-pushed pending writes
58 * @retry_timer: retry timer for various operations
59 * @err: connection error status
60 * @callback_lock: lock for opaque user context
61 * @user_data: opaque user context
62 * @rcv_nxt: next receive seq. #
63 * @copied_seq: head of yet unread data
64 * @rcv_wup: rcv_nxt on last window update sent
65 * @snd_nxt: next sequence we send
66 * @snd_una: first byte we want an ack for
67 * @write_seq: tail+1 of data held in send buffer
68 */
69struct s3_conn {
70 struct net_device *dev;
71 struct t3cdev *cdev;
72 unsigned long flags;
73 int tid;
74 int qset;
75 int mss_idx;
76 struct l2t_entry *l2t;
77 int wr_max;
78 int wr_avail;
79 int wr_unacked;
80 struct sk_buff *wr_pending_head;
81 struct sk_buff *wr_pending_tail;
82 struct sk_buff *cpl_close;
83 struct sk_buff *cpl_abort_req;
84 struct sk_buff *cpl_abort_rpl;
85 spinlock_t lock;
86 atomic_t refcnt;
87 volatile unsigned int state;
88 struct sockaddr_in saddr;
89 struct sockaddr_in daddr;
90 struct dst_entry *dst_cache;
91 struct sk_buff_head receive_queue;
92 struct sk_buff_head write_queue;
93 struct timer_list retry_timer;
94 int err;
95 rwlock_t callback_lock;
96 void *user_data;
97
98 u32 rcv_nxt;
99 u32 copied_seq;
100 u32 rcv_wup;
101 u32 snd_nxt;
102 u32 snd_una;
103 u32 write_seq;
104};
105
106/*
107 * connection state
108 */
109enum conn_states {
110 C3CN_STATE_CONNECTING = 1,
111 C3CN_STATE_ESTABLISHED,
112 C3CN_STATE_ACTIVE_CLOSE,
113 C3CN_STATE_PASSIVE_CLOSE,
114 C3CN_STATE_CLOSE_WAIT_1,
115 C3CN_STATE_CLOSE_WAIT_2,
116 C3CN_STATE_ABORTING,
117 C3CN_STATE_CLOSED,
118};
119
120static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
121{
122 return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
123}
124static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
125{
126 return c3cn->state == C3CN_STATE_ESTABLISHED;
127}
128
129/*
130 * Connection flags -- many to track some close related events.
131 */
132enum c3cn_flags {
133 C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
134 C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
135 C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
136 C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
137 C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
138};
139
140/**
141 * cxgb3i_sdev_data - Per adapter data.
142 * Linked off of each Ethernet device port on the adapter.
143 * Also available via the t3cdev structure since we have pointers to our port
144 * net_device's there ...
145 *
146 * @list: list head to link elements
147 * @cdev: t3cdev adapter
148 * @client: CPL client pointer
149 * @ports: array of adapter ports
150 * @sport_map_next: next index into the port map
151 * @sport_map: source port map
152 */
153struct cxgb3i_sdev_data {
154 struct list_head list;
155 struct t3cdev *cdev;
156 struct cxgb3_client *client;
157 struct adap_ports ports;
158 unsigned int sport_map_next;
159 unsigned long sport_map[0];
160};
161#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
162#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
163
164void cxgb3i_sdev_cleanup(void);
165int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
166void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
167void cxgb3i_sdev_remove(struct t3cdev *);
168
169struct s3_conn *cxgb3i_c3cn_create(void);
170int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
171void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
172int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
173void cxgb3i_c3cn_release(struct s3_conn *);
174
175/**
176 * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
177 *
178 * @flag: see C3CB_FLAG_* below
179 * @ulp_mode: ULP mode/submode of sk_buff
180 * @seq: tcp sequence number
181 * @ddigest: pdu data digest
182 * @pdulen: recovered pdu length
183 * @wr_data: scratch area for tx wr
184 */
185struct cxgb3_skb_cb {
186 __u8 flags;
187 __u8 ulp_mode;
188 __u32 seq;
189 __u32 ddigest;
190 __u32 pdulen;
191 struct sk_buff *wr_data;
192};
193
194#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
195
196#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
197#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest)
198#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
199#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data)
200
201enum c3cb_flags {
202 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
203 C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
204 C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
205};
206
207/**
208 * sge_opaque_hdr -
209 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
210 * and for which we must reserve space.
211 */
212struct sge_opaque_hdr {
213 void *dev;
214 dma_addr_t addr[MAX_SKB_FRAGS + 1];
215};
216
217/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
218#define TX_HEADER_LEN \
219 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
220
221/*
222 * get and set private ip for iscsi traffic
223 */
224#define cxgb3i_get_private_ipv4addr(ndev) \
225 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
226#define cxgb3i_set_private_ipv4addr(ndev, addr) \
227 (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
228
229/* max. connections per adapter */
230#define CXGB3I_MAX_CONN 16384
231#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
new file mode 100644
index 000000000000..ce7ce8c6094c
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -0,0 +1,402 @@
1/*
2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Karen Xie (kxie@chelsio.com)
13 */
14
15#include <linux/skbuff.h>
16#include <linux/crypto.h>
17#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_host.h>
19
20#include "cxgb3i.h"
21#include "cxgb3i_pdu.h"
22
23#ifdef __DEBUG_CXGB3I_RX__
24#define cxgb3i_rx_debug cxgb3i_log_debug
25#else
26#define cxgb3i_rx_debug(fmt...)
27#endif
28
29#ifdef __DEBUG_CXGB3I_TX__
30#define cxgb3i_tx_debug cxgb3i_log_debug
31#else
32#define cxgb3i_tx_debug(fmt...)
33#endif
34
35static struct page *pad_page;
36
37/*
38 * pdu receive, interact with libiscsi_tcp
39 */
40static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
41 unsigned int offset, int offloaded)
42{
43 int status = 0;
44 int bytes_read;
45
46 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
47 switch (status) {
48 case ISCSI_TCP_CONN_ERR:
49 return -EIO;
50 case ISCSI_TCP_SUSPENDED:
51 /* no transfer - just have caller flush queue */
52 return bytes_read;
53 case ISCSI_TCP_SKB_DONE:
54 /*
55 * pdus should always fit in the skb and we should get
56 * segment done notifcation.
57 */
58 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
59 return -EFAULT;
60 case ISCSI_TCP_SEGMENT_DONE:
61 return bytes_read;
62 default:
63 iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
64 "status %d\n", status);
65 return -EINVAL;
66 }
67}
68
69static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
70 struct sk_buff *skb)
71{
72 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
73 bool offloaded = 0;
74 unsigned int offset;
75 int rc;
76
77 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
78 conn, skb, skb->len, skb_ulp_mode(skb));
79
80 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
81 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
82 return -EIO;
83 }
84
85 if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
86 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
87 return -EIO;
88 }
89
90 if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
91 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
92 return -EIO;
93 }
94
95 /* iscsi hdr */
96 rc = read_pdu_skb(conn, skb, 0, 0);
97 if (rc <= 0)
98 return rc;
99
100 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
101 return 0;
102
103 offset = rc;
104 if (conn->hdrdgst_en)
105 offset += ISCSI_DIGEST_SIZE;
106
107 /* iscsi data */
108 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
109 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
110 "itt 0x%x.\n",
111 skb,
112 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
113 tcp_conn->in.datalen,
114 ntohl(tcp_conn->in.hdr->itt));
115 offloaded = 1;
116 } else {
117 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
118 "itt 0x%x.\n",
119 skb,
120 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
121 tcp_conn->in.datalen,
122 ntohl(tcp_conn->in.hdr->itt));
123 offset += sizeof(struct cpl_iscsi_hdr_norss);
124 }
125
126 rc = read_pdu_skb(conn, skb, offset, offloaded);
127 if (rc < 0)
128 return rc;
129 else
130 return 0;
131}
132
133/*
134 * pdu transmit, interact with libiscsi_tcp
135 */
136static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
137{
138 u8 submode = 0;
139
140 if (hcrc)
141 submode |= 1;
142 if (dcrc)
143 submode |= 2;
144 skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
145}
146
147void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
148{
149 struct iscsi_tcp_task *tcp_task = task->dd_data;
150
151 /* never reached the xmit task callout */
152 if (tcp_task->dd_data)
153 kfree_skb(tcp_task->dd_data);
154 tcp_task->dd_data = NULL;
155
156 /* MNC - Do we need a check in case this is called but
157 * cxgb3i_conn_alloc_pdu has never been called on the task */
158 cxgb3i_release_itt(task, task->hdr_itt);
159 iscsi_tcp_cleanup_task(task);
160}
161
162/*
163 * We do not support ahs yet
164 */
165int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
166{
167 struct iscsi_tcp_task *tcp_task = task->dd_data;
168 struct sk_buff *skb;
169
170 task->hdr = NULL;
171 /* always allocate rooms for AHS */
172 skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
173 TX_HEADER_LEN, GFP_ATOMIC);
174 if (!skb)
175 return -ENOMEM;
176
177 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
178 task, opcode, skb);
179
180 tcp_task->dd_data = skb;
181 skb_reserve(skb, TX_HEADER_LEN);
182 task->hdr = (struct iscsi_hdr *)skb->data;
183 task->hdr_max = sizeof(struct iscsi_hdr);
184
185 /* data_out uses scsi_cmd's itt */
186 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
187 cxgb3i_reserve_itt(task, &task->hdr->itt);
188
189 return 0;
190}
191
192int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
193 unsigned int count)
194{
195 struct iscsi_tcp_task *tcp_task = task->dd_data;
196 struct sk_buff *skb = tcp_task->dd_data;
197 struct iscsi_conn *conn = task->conn;
198 struct page *pg;
199 unsigned int datalen = count;
200 int i, padlen = iscsi_padding(count);
201 skb_frag_t *frag;
202
203 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
204 task, task->sc, offset, count, skb);
205
206 skb_put(skb, task->hdr_len);
207 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
208 if (!count)
209 return 0;
210
211 if (task->sc) {
212 struct scatterlist *sg;
213 struct scsi_data_buffer *sdb;
214 unsigned int sgoffset = offset;
215 struct page *sgpg;
216 unsigned int sglen;
217
218 sdb = scsi_out(task->sc);
219 sg = sdb->table.sgl;
220
221 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
222 cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
223 i, sg_page(sg), sg->length, sg->offset);
224
225 if (sgoffset < sg->length)
226 break;
227 sgoffset -= sg->length;
228 }
229 sgpg = sg_page(sg);
230 sglen = sg->length - sgoffset;
231
232 do {
233 int j = skb_shinfo(skb)->nr_frags;
234 unsigned int copy;
235
236 if (!sglen) {
237 sg = sg_next(sg);
238 sgpg = sg_page(sg);
239 sgoffset = 0;
240 sglen = sg->length;
241 ++i;
242 }
243 copy = min(sglen, datalen);
244 if (j && skb_can_coalesce(skb, j, sgpg,
245 sg->offset + sgoffset)) {
246 skb_shinfo(skb)->frags[j - 1].size += copy;
247 } else {
248 get_page(sgpg);
249 skb_fill_page_desc(skb, j, sgpg,
250 sg->offset + sgoffset, copy);
251 }
252 sgoffset += copy;
253 sglen -= copy;
254 datalen -= copy;
255 } while (datalen);
256 } else {
257 pg = virt_to_page(task->data);
258
259 while (datalen) {
260 i = skb_shinfo(skb)->nr_frags;
261 frag = &skb_shinfo(skb)->frags[i];
262
263 get_page(pg);
264 frag->page = pg;
265 frag->page_offset = 0;
266 frag->size = min((unsigned int)PAGE_SIZE, datalen);
267
268 skb_shinfo(skb)->nr_frags++;
269 datalen -= frag->size;
270 pg++;
271 }
272 }
273
274 if (padlen) {
275 i = skb_shinfo(skb)->nr_frags;
276 frag = &skb_shinfo(skb)->frags[i];
277 frag->page = pad_page;
278 frag->page_offset = 0;
279 frag->size = padlen;
280 skb_shinfo(skb)->nr_frags++;
281 }
282
283 datalen = count + padlen;
284 skb->data_len += datalen;
285 skb->truesize += datalen;
286 skb->len += datalen;
287 return 0;
288}
289
290int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
291{
292 struct iscsi_tcp_task *tcp_task = task->dd_data;
293 struct sk_buff *skb = tcp_task->dd_data;
294 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
295 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
296 unsigned int datalen;
297 int err;
298
299 if (!skb)
300 return 0;
301
302 datalen = skb->data_len;
303 tcp_task->dd_data = NULL;
304 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
305 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
306 task, skb, skb->len, skb->data_len, err);
307 if (err > 0) {
308 int pdulen = err;
309
310 if (task->conn->hdrdgst_en)
311 pdulen += ISCSI_DIGEST_SIZE;
312 if (datalen && task->conn->datadgst_en)
313 pdulen += ISCSI_DIGEST_SIZE;
314
315 task->conn->txdata_octets += pdulen;
316 return 0;
317 }
318
319 if (err < 0 && err != -EAGAIN) {
320 kfree_skb(skb);
321 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
322 task->itt, skb, skb->len, skb->data_len, err);
323 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
324 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
325 return err;
326 }
327 /* reset skb to send when we are called again */
328 tcp_task->dd_data = skb;
329 return -EAGAIN;
330}
331
332int cxgb3i_pdu_init(void)
333{
334 pad_page = alloc_page(GFP_KERNEL);
335 if (!pad_page)
336 return -ENOMEM;
337 memset(page_address(pad_page), 0, PAGE_SIZE);
338 return 0;
339}
340
341void cxgb3i_pdu_cleanup(void)
342{
343 if (pad_page) {
344 __free_page(pad_page);
345 pad_page = NULL;
346 }
347}
348
349void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
350{
351 struct sk_buff *skb;
352 unsigned int read = 0;
353 struct iscsi_conn *conn = c3cn->user_data;
354 int err = 0;
355
356 cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
357
358 read_lock(&c3cn->callback_lock);
359 if (unlikely(!conn || conn->suspend_rx)) {
360 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
361 conn, conn ? conn->id : 0xFF,
362 conn ? conn->suspend_rx : 0xFF);
363 read_unlock(&c3cn->callback_lock);
364 return;
365 }
366 skb = skb_peek(&c3cn->receive_queue);
367 while (!err && skb) {
368 __skb_unlink(skb, &c3cn->receive_queue);
369 read += skb_ulp_pdulen(skb);
370 err = cxgb3i_conn_read_pdu_skb(conn, skb);
371 __kfree_skb(skb);
372 skb = skb_peek(&c3cn->receive_queue);
373 }
374 read_unlock(&c3cn->callback_lock);
375 if (c3cn) {
376 c3cn->copied_seq += read;
377 cxgb3i_c3cn_rx_credits(c3cn, read);
378 }
379 conn->rxdata_octets += read;
380}
381
382void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
383{
384 struct iscsi_conn *conn = c3cn->user_data;
385
386 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
387 if (conn) {
388 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
389 scsi_queue_work(conn->session->host, &conn->xmitwork);
390 }
391}
392
393void cxgb3i_conn_closing(struct s3_conn *c3cn)
394{
395 struct iscsi_conn *conn;
396
397 read_lock(&c3cn->callback_lock);
398 conn = c3cn->user_data;
399 if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
400 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
401 read_unlock(&c3cn->callback_lock);
402}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
new file mode 100644
index 000000000000..a3f685cc2362
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -0,0 +1,59 @@
1/*
2 * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB3I_ULP2_PDU_H__
14#define __CXGB3I_ULP2_PDU_H__
15
16struct cpl_iscsi_hdr_norss {
17 union opcode_tid ot;
18 u16 pdu_len_ddp;
19 u16 len;
20 u32 seq;
21 u16 urg;
22 u8 rsvd;
23 u8 status;
24};
25
26struct cpl_rx_data_ddp_norss {
27 union opcode_tid ot;
28 u16 urg;
29 u16 len;
30 u32 seq;
31 u32 nxt_seq;
32 u32 ulp_crc;
33 u32 ddp_status;
34};
35
36#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
37#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
38#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
39#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
40#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
41#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
42#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
43#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
44#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
45#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
46#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
47#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
48#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
49
50#define ULP2_FLAG_DATA_READY 0x1
51#define ULP2_FLAG_DATA_DDPED 0x2
52#define ULP2_FLAG_HCRC_ERROR 0x10
53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40
55
56void cxgb3i_conn_closing(struct s3_conn *);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3d50cabca7ee..53664765570a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -24,6 +24,7 @@
24#include <scsi/scsi_dh.h> 24#include <scsi/scsi_dh.h>
25 25
26#define RDAC_NAME "rdac" 26#define RDAC_NAME "rdac"
27#define RDAC_RETRY_COUNT 5
27 28
28/* 29/*
29 * LSI mode page stuff 30 * LSI mode page stuff
@@ -386,6 +387,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
386 struct c9_inquiry *inqp; 387 struct c9_inquiry *inqp;
387 388
388 h->lun_state = RDAC_LUN_UNOWNED; 389 h->lun_state = RDAC_LUN_UNOWNED;
390 h->state = RDAC_STATE_ACTIVE;
389 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 391 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
390 if (err == SCSI_DH_OK) { 392 if (err == SCSI_DH_OK) {
391 inqp = &h->inq.c9; 393 inqp = &h->inq.c9;
@@ -477,21 +479,27 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
477{ 479{
478 struct request *rq; 480 struct request *rq;
479 struct request_queue *q = sdev->request_queue; 481 struct request_queue *q = sdev->request_queue;
480 int err = SCSI_DH_RES_TEMP_UNAVAIL; 482 int err, retry_cnt = RDAC_RETRY_COUNT;
481 483
484retry:
485 err = SCSI_DH_RES_TEMP_UNAVAIL;
482 rq = rdac_failover_get(sdev, h); 486 rq = rdac_failover_get(sdev, h);
483 if (!rq) 487 if (!rq)
484 goto done; 488 goto done;
485 489
486 sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n"); 490 sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
491 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
487 492
488 err = blk_execute_rq(q, NULL, rq, 1); 493 err = blk_execute_rq(q, NULL, rq, 1);
489 if (err != SCSI_DH_OK) 494 blk_put_request(rq);
495 if (err != SCSI_DH_OK) {
490 err = mode_select_handle_sense(sdev, h->sense); 496 err = mode_select_handle_sense(sdev, h->sense);
497 if (err == SCSI_DH_RETRY && retry_cnt--)
498 goto retry;
499 }
491 if (err == SCSI_DH_OK) 500 if (err == SCSI_DH_OK)
492 h->state = RDAC_STATE_ACTIVE; 501 h->state = RDAC_STATE_ACTIVE;
493 502
494 blk_put_request(rq);
495done: 503done:
496 return err; 504 return err;
497} 505}
@@ -594,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
594 {"SUN", "LCSM100_F"}, 602 {"SUN", "LCSM100_F"},
595 {"DELL", "MD3000"}, 603 {"DELL", "MD3000"},
596 {"DELL", "MD3000i"}, 604 {"DELL", "MD3000i"},
605 {"LSI", "INF-01-00"},
606 {"ENGENIO", "INF-01-00"},
597 {NULL, NULL}, 607 {NULL, NULL},
598}; 608};
599 609
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index a73a6bbb1b2b..976cdd5c94ef 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1626,8 +1626,15 @@ static void map_dma(unsigned int i, struct hostdata *ha)
1626 1626
1627 cpp->sense_len = SCSI_SENSE_BUFFERSIZE; 1627 cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
1628 1628
1629 count = scsi_dma_map(SCpnt); 1629 if (!scsi_sg_count(SCpnt)) {
1630 BUG_ON(count < 0); 1630 cpp->data_len = 0;
1631 return;
1632 }
1633
1634 count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1635 pci_dir);
1636 BUG_ON(!count);
1637
1631 scsi_for_each_sg(SCpnt, sg, count, k) { 1638 scsi_for_each_sg(SCpnt, sg, count, k) {
1632 cpp->sglist[k].address = H2DEV(sg_dma_address(sg)); 1639 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1633 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg)); 1640 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
@@ -1655,7 +1662,9 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
1655 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr), 1662 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
1656 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1663 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1657 1664
1658 scsi_dma_unmap(SCpnt); 1665 if (scsi_sg_count(SCpnt))
1666 pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1667 pci_dir);
1659 1668
1660 if (!DEV2H(cpp->data_len)) 1669 if (!DEV2H(cpp->data_len))
1661 pci_dir = PCI_DMA_BIDIRECTIONAL; 1670 pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 952505c006df..152dd15db276 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -14,8 +14,8 @@
14 * neuffer@goofy.zdv.uni-mainz.de * 14 * neuffer@goofy.zdv.uni-mainz.de *
15 * a.arnold@kfa-juelich.de * 15 * a.arnold@kfa-juelich.de *
16 * * 16 * *
17 * Updated 2002 by Alan Cox <alan@redhat.com> for Linux * 17 * Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
18 * 2.5.x and the newer locking and error handling * 18 * Linux 2.5.x and the newer locking and error handling *
19 * * 19 * *
20 * This program is free software; you can redistribute it * 20 * This program is free software; you can redistribute it *
21 * and/or modify it under the terms of the GNU General * 21 * and/or modify it under the terms of the GNU General *
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 62a4618530d0..a680e18b5f3b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1453,7 +1453,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1453 offset = 0; 1453 offset = 0;
1454 1454
1455 if (offset) { 1455 if (offset) {
1456 int rounded_up, one_clock; 1456 int one_clock;
1457 1457
1458 if (period > esp->max_period) { 1458 if (period > esp->max_period) {
1459 period = offset = 0; 1459 period = offset = 0;
@@ -1463,9 +1463,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1463 goto do_reject; 1463 goto do_reject;
1464 1464
1465 one_clock = esp->ccycle / 1000; 1465 one_clock = esp->ccycle / 1000;
1466 rounded_up = (period << 2); 1466 stp = DIV_ROUND_UP(period << 2, one_clock);
1467 rounded_up = (rounded_up + one_clock - 1) / one_clock;
1468 stp = rounded_up;
1469 if (stp && esp->rev >= FAS236) { 1467 if (stp && esp->rev >= FAS236) {
1470 if (stp >= 50) 1468 if (stp >= 50)
1471 stp--; 1469 stp--;
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 000000000000..b78da06d7c0e
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,8 @@
1# $Id: Makefile
2
3obj-$(CONFIG_FCOE) += fcoe.o
4
5fcoe-y := \
6 libfcoe.o \
7 fcoe_sw.o \
8 fc_transport_fcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
new file mode 100644
index 000000000000..bf7fe6fc0820
--- /dev/null
+++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
@@ -0,0 +1,446 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/pci.h>
21#include <scsi/libfcoe.h>
22#include <scsi/fc_transport_fcoe.h>
23
24/* internal fcoe transport */
25struct fcoe_transport_internal {
26 struct fcoe_transport *t;
27 struct net_device *netdev;
28 struct list_head list;
29};
30
31/* fcoe transports list and its lock */
32static LIST_HEAD(fcoe_transports);
33static DEFINE_MUTEX(fcoe_transports_lock);
34
35/**
36 * fcoe_transport_default - returns ptr to the default transport fcoe_sw
37 **/
38struct fcoe_transport *fcoe_transport_default(void)
39{
40 return &fcoe_sw_transport;
41}
42
43/**
44 * fcoe_transport_to_pcidev - get the pci dev from a netdev
45 * @netdev: the netdev that pci dev will be retrived from
46 *
47 * Returns: NULL or the corrsponding pci_dev
48 **/
49struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
50{
51 if (!netdev->dev.parent)
52 return NULL;
53 return to_pci_dev(netdev->dev.parent);
54}
55
56/**
57 * fcoe_transport_device_lookup - find out netdev is managed by the
58 * transport
59 * assign a transport to a device
60 * @netdev: the netdev the transport to be attached to
61 *
62 * This will look for existing offload driver, if not found, it falls back to
63 * the default sw hba (fcoe_sw) as its fcoe transport.
64 *
65 * Returns: 0 for success
66 **/
67static struct fcoe_transport_internal *fcoe_transport_device_lookup(
68 struct fcoe_transport *t, struct net_device *netdev)
69{
70 struct fcoe_transport_internal *ti;
71
72 /* assign the transpor to this device */
73 mutex_lock(&t->devlock);
74 list_for_each_entry(ti, &t->devlist, list) {
75 if (ti->netdev == netdev) {
76 mutex_unlock(&t->devlock);
77 return ti;
78 }
79 }
80 mutex_unlock(&t->devlock);
81 return NULL;
82}
83/**
84 * fcoe_transport_device_add - assign a transport to a device
85 * @netdev: the netdev the transport to be attached to
86 *
87 * This will look for existing offload driver, if not found, it falls back to
88 * the default sw hba (fcoe_sw) as its fcoe transport.
89 *
90 * Returns: 0 for success
91 **/
92static int fcoe_transport_device_add(struct fcoe_transport *t,
93 struct net_device *netdev)
94{
95 struct fcoe_transport_internal *ti;
96
97 ti = fcoe_transport_device_lookup(t, netdev);
98 if (ti) {
99 printk(KERN_DEBUG "fcoe_transport_device_add:"
100 "device %s is already added to transport %s\n",
101 netdev->name, t->name);
102 return -EEXIST;
103 }
104 /* allocate an internal struct to host the netdev and the list */
105 ti = kzalloc(sizeof(*ti), GFP_KERNEL);
106 if (!ti)
107 return -ENOMEM;
108
109 ti->t = t;
110 ti->netdev = netdev;
111 INIT_LIST_HEAD(&ti->list);
112 dev_hold(ti->netdev);
113
114 mutex_lock(&t->devlock);
115 list_add(&ti->list, &t->devlist);
116 mutex_unlock(&t->devlock);
117
118 printk(KERN_DEBUG "fcoe_transport_device_add:"
119 "device %s added to transport %s\n",
120 netdev->name, t->name);
121
122 return 0;
123}
124
125/**
126 * fcoe_transport_device_remove - remove a device from its transport
127 * @netdev: the netdev the transport to be attached to
128 *
129 * this removes the device from the transport so the given transport will
130 * not manage this device any more
131 *
132 * Returns: 0 for success
133 **/
134static int fcoe_transport_device_remove(struct fcoe_transport *t,
135 struct net_device *netdev)
136{
137 struct fcoe_transport_internal *ti;
138
139 ti = fcoe_transport_device_lookup(t, netdev);
140 if (!ti) {
141 printk(KERN_DEBUG "fcoe_transport_device_remove:"
142 "device %s is not managed by transport %s\n",
143 netdev->name, t->name);
144 return -ENODEV;
145 }
146 mutex_lock(&t->devlock);
147 list_del(&ti->list);
148 mutex_unlock(&t->devlock);
149 printk(KERN_DEBUG "fcoe_transport_device_remove:"
150 "device %s removed from transport %s\n",
151 netdev->name, t->name);
152 dev_put(ti->netdev);
153 kfree(ti);
154 return 0;
155}
156
157/**
158 * fcoe_transport_device_remove_all - remove all from transport devlist
159 *
160 * this removes the device from the transport so the given transport will
161 * not manage this device any more
162 *
163 * Returns: 0 for success
164 **/
165static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
166{
167 struct fcoe_transport_internal *ti, *tmp;
168
169 mutex_lock(&t->devlock);
170 list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
171 list_del(&ti->list);
172 kfree(ti);
173 }
174 mutex_unlock(&t->devlock);
175}
176
177/**
178 * fcoe_transport_match - use the bus device match function to match the hw
179 * @t: the fcoe transport
180 * @netdev:
181 *
182 * This function is used to check if the givne transport wants to manage the
183 * input netdev. if the transports implements the match function, it will be
184 * called, o.w. we just compare the pci vendor and device id.
185 *
186 * Returns: true for match up
187 **/
188static bool fcoe_transport_match(struct fcoe_transport *t,
189 struct net_device *netdev)
190{
191 /* match transport by vendor and device id */
192 struct pci_dev *pci;
193
194 pci = fcoe_transport_pcidev(netdev);
195
196 if (pci) {
197 printk(KERN_DEBUG "fcoe_transport_match:"
198 "%s:%x:%x -- %s:%x:%x\n",
199 t->name, t->vendor, t->device,
200 netdev->name, pci->vendor, pci->device);
201
202 /* if transport supports match */
203 if (t->match)
204 return t->match(netdev);
205
206 /* else just compare the vendor and device id: pci only */
207 return (t->vendor == pci->vendor) && (t->device == pci->device);
208 }
209 return false;
210}
211
212/**
213 * fcoe_transport_lookup - check if the transport is already registered
214 * @t: the transport to be looked up
215 *
216 * This compares the parent device (pci) vendor and device id
217 *
218 * Returns: NULL if not found
219 *
220 * TODO - return default sw transport if no other transport is found
221 **/
222static struct fcoe_transport *fcoe_transport_lookup(
223 struct net_device *netdev)
224{
225 struct fcoe_transport *t;
226
227 mutex_lock(&fcoe_transports_lock);
228 list_for_each_entry(t, &fcoe_transports, list) {
229 if (fcoe_transport_match(t, netdev)) {
230 mutex_unlock(&fcoe_transports_lock);
231 return t;
232 }
233 }
234 mutex_unlock(&fcoe_transports_lock);
235
236 printk(KERN_DEBUG "fcoe_transport_lookup:"
237 "use default transport for %s\n", netdev->name);
238 return fcoe_transport_default();
239}
240
241/**
242 * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
243 * @t: ptr to the fcoe transport to be added
244 *
245 * Returns: 0 for success
246 **/
247int fcoe_transport_register(struct fcoe_transport *t)
248{
249 struct fcoe_transport *tt;
250
251 /* TODO - add fcoe_transport specific initialization here */
252 mutex_lock(&fcoe_transports_lock);
253 list_for_each_entry(tt, &fcoe_transports, list) {
254 if (tt == t) {
255 mutex_unlock(&fcoe_transports_lock);
256 return -EEXIST;
257 }
258 }
259 list_add_tail(&t->list, &fcoe_transports);
260 mutex_unlock(&fcoe_transports_lock);
261
262 mutex_init(&t->devlock);
263 INIT_LIST_HEAD(&t->devlist);
264
265 printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
266
267 return 0;
268}
269EXPORT_SYMBOL_GPL(fcoe_transport_register);
270
271/**
272 * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
273 * @t: ptr to the fcoe transport to be removed
274 *
275 * Returns: 0 for success
276 **/
277int fcoe_transport_unregister(struct fcoe_transport *t)
278{
279 struct fcoe_transport *tt, *tmp;
280
281 mutex_lock(&fcoe_transports_lock);
282 list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
283 if (tt == t) {
284 list_del(&t->list);
285 mutex_unlock(&fcoe_transports_lock);
286 fcoe_transport_device_remove_all(t);
287 printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
288 t->name);
289 return 0;
290 }
291 }
292 mutex_unlock(&fcoe_transports_lock);
293 return -ENODEV;
294}
295EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
296
297/*
298 * fcoe_load_transport_driver - load an offload driver by alias name
299 * @netdev: the target net device
300 *
301 * Requests for an offload driver module as the fcoe transport, if fails, it
302 * falls back to use the SW HBA (fcoe_sw) as its transport
303 *
304 * TODO -
305 * 1. supports only PCI device
306 * 2. needs fix for VLAn and bonding
307 * 3. pure hw fcoe hba may not have netdev
308 *
309 * Returns: 0 for success
310 **/
311int fcoe_load_transport_driver(struct net_device *netdev)
312{
313 struct pci_dev *pci;
314 struct device *dev = netdev->dev.parent;
315
316 if (fcoe_transport_lookup(netdev)) {
317 /* load default transport */
318 printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
319 netdev->name);
320 return -EEXIST;
321 }
322
323 pci = to_pci_dev(dev);
324 if (dev->bus != &pci_bus_type) {
325 printk(KERN_DEBUG "fcoe: support noly PCI device\n");
326 return -ENODEV;
327 }
328 printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
329 pci->vendor, pci->device);
330
331 return request_module("fcoe-pci-0x%04x-0x%04x",
332 pci->vendor, pci->device);
333
334}
335EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
336
337/**
338 * fcoe_transport_attach - load transport to fcoe
339 * @netdev: the netdev the transport to be attached to
340 *
341 * This will look for existing offload driver, if not found, it falls back to
342 * the default sw hba (fcoe_sw) as its fcoe transport.
343 *
344 * Returns: 0 for success
345 **/
346int fcoe_transport_attach(struct net_device *netdev)
347{
348 struct fcoe_transport *t;
349
350 /* find the corresponding transport */
351 t = fcoe_transport_lookup(netdev);
352 if (!t) {
353 printk(KERN_DEBUG "fcoe_transport_attach"
354 ":no transport for %s:use %s\n",
355 netdev->name, t->name);
356 return -ENODEV;
357 }
358 /* add to the transport */
359 if (fcoe_transport_device_add(t, netdev)) {
360 printk(KERN_DEBUG "fcoe_transport_attach"
361 ":failed to add %s to tramsport %s\n",
362 netdev->name, t->name);
363 return -EIO;
364 }
365 /* transport create function */
366 if (t->create)
367 t->create(netdev);
368
369 printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
370 t->name, netdev->name);
371 return 0;
372}
373EXPORT_SYMBOL_GPL(fcoe_transport_attach);
374
375/**
376 * fcoe_transport_release - unload transport from fcoe
377 * @netdev: the net device on which fcoe is to be released
378 *
379 * Returns: 0 for success
380 **/
381int fcoe_transport_release(struct net_device *netdev)
382{
383 struct fcoe_transport *t;
384
385 /* find the corresponding transport */
386 t = fcoe_transport_lookup(netdev);
387 if (!t) {
388 printk(KERN_DEBUG "fcoe_transport_release:"
389 "no transport for %s:use %s\n",
390 netdev->name, t->name);
391 return -ENODEV;
392 }
393 /* remove the device from the transport */
394 if (fcoe_transport_device_remove(t, netdev)) {
395 printk(KERN_DEBUG "fcoe_transport_release:"
396 "failed to add %s to tramsport %s\n",
397 netdev->name, t->name);
398 return -EIO;
399 }
400 /* transport destroy function */
401 if (t->destroy)
402 t->destroy(netdev);
403
404 printk(KERN_DEBUG "fcoe_transport_release:"
405 "device %s dettached from transport %s\n",
406 netdev->name, t->name);
407
408 return 0;
409}
410EXPORT_SYMBOL_GPL(fcoe_transport_release);
411
412/**
413 * fcoe_transport_init - initializes fcoe transport layer
414 *
415 * This prepares for the fcoe transport layer
416 *
417 * Returns: none
418 **/
419int __init fcoe_transport_init(void)
420{
421 INIT_LIST_HEAD(&fcoe_transports);
422 mutex_init(&fcoe_transports_lock);
423 return 0;
424}
425
426/**
427 * fcoe_transport_exit - cleans up the fcoe transport layer
428 * This cleans up the fcoe transport layer. removing any transport on the list,
429 * note that the transport destroy func is not called here.
430 *
431 * Returns: none
432 **/
433int __exit fcoe_transport_exit(void)
434{
435 struct fcoe_transport *t, *tmp;
436
437 mutex_lock(&fcoe_transports_lock);
438 list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
439 list_del(&t->list);
440 mutex_unlock(&fcoe_transports_lock);
441 fcoe_transport_device_remove_all(t);
442 mutex_lock(&fcoe_transports_lock);
443 }
444 mutex_unlock(&fcoe_transports_lock);
445 return 0;
446}
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
new file mode 100644
index 000000000000..dc4cd5e25760
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sw.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/if_vlan.h>
29#include <net/rtnetlink.h>
30
31#include <scsi/fc/fc_els.h>
32#include <scsi/fc/fc_encaps.h>
33#include <scsi/fc/fc_fs.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include <scsi/libfc.h>
38#include <scsi/libfcoe.h>
39#include <scsi/fc_transport_fcoe.h>
40
41#define FCOE_SW_VERSION "0.1"
42#define FCOE_SW_NAME "fcoesw"
43#define FCOE_SW_VENDOR "Open-FCoE.org"
44
45#define FCOE_MAX_LUN 255
46#define FCOE_MAX_FCP_TARGET 256
47
48#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
49
50#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
51#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
52
53static struct scsi_transport_template *scsi_transport_fcoe_sw;
54
55struct fc_function_template fcoe_sw_transport_function = {
56 .show_host_node_name = 1,
57 .show_host_port_name = 1,
58 .show_host_supported_classes = 1,
59 .show_host_supported_fc4s = 1,
60 .show_host_active_fc4s = 1,
61 .show_host_maxframe_size = 1,
62
63 .show_host_port_id = 1,
64 .show_host_supported_speeds = 1,
65 .get_host_speed = fc_get_host_speed,
66 .show_host_speed = 1,
67 .show_host_port_type = 1,
68 .get_host_port_state = fc_get_host_port_state,
69 .show_host_port_state = 1,
70 .show_host_symbolic_name = 1,
71
72 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
73 .show_rport_maxframe_size = 1,
74 .show_rport_supported_classes = 1,
75
76 .show_host_fabric_name = 1,
77 .show_starget_node_name = 1,
78 .show_starget_port_name = 1,
79 .show_starget_port_id = 1,
80 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
81 .show_rport_dev_loss_tmo = 1,
82 .get_fc_host_stats = fc_get_host_stats,
83 .issue_fc_host_lip = fcoe_reset,
84
85 .terminate_rport_io = fc_rport_terminate_io,
86};
87
88static struct scsi_host_template fcoe_sw_shost_template = {
89 .module = THIS_MODULE,
90 .name = "FCoE Driver",
91 .proc_name = FCOE_SW_NAME,
92 .queuecommand = fc_queuecommand,
93 .eh_abort_handler = fc_eh_abort,
94 .eh_device_reset_handler = fc_eh_device_reset,
95 .eh_host_reset_handler = fc_eh_host_reset,
96 .slave_alloc = fc_slave_alloc,
97 .change_queue_depth = fc_change_queue_depth,
98 .change_queue_type = fc_change_queue_type,
99 .this_id = -1,
100 .cmd_per_lun = 32,
101 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
102 .use_clustering = ENABLE_CLUSTERING,
103 .sg_tablesize = SG_ALL,
104 .max_sectors = 0xffff,
105};
106
107/*
108 * fcoe_sw_lport_config - sets up the fc_lport
109 * @lp: ptr to the fc_lport
110 * @shost: ptr to the parent scsi host
111 *
112 * Returns: 0 for success
113 *
114 */
115static int fcoe_sw_lport_config(struct fc_lport *lp)
116{
117 int i = 0;
118
119 lp->link_status = 0;
120 lp->max_retry_count = 3;
121 lp->e_d_tov = 2 * 1000; /* FC-FS default */
122 lp->r_a_tov = 2 * 2 * 1000;
123 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
124 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
125
126 /*
127 * allocate per cpu stats block
128 */
129 for_each_online_cpu(i)
130 lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
131 GFP_KERNEL);
132
133 /* lport fc_lport related configuration */
134 fc_lport_config(lp);
135
136 return 0;
137}
138
139/*
140 * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
141 * related properties
142 * @lp : ptr to the fc_lport
143 * @netdev : ptr to the associated netdevice struct
144 *
145 * Must be called after fcoe_sw_lport_config() as it will use lport mutex
146 *
147 * Returns : 0 for success
148 *
149 */
150static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
151{
152 u32 mfs;
153 u64 wwnn, wwpn;
154 struct fcoe_softc *fc;
155 u8 flogi_maddr[ETH_ALEN];
156
157 /* Setup lport private data to point to fcoe softc */
158 fc = lport_priv(lp);
159 fc->lp = lp;
160 fc->real_dev = netdev;
161 fc->phys_dev = netdev;
162
163 /* Require support for get_pauseparam ethtool op. */
164 if (netdev->priv_flags & IFF_802_1Q_VLAN)
165 fc->phys_dev = vlan_dev_real_dev(netdev);
166
167 /* Do not support for bonding device */
168 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
169 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
170 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
171 return -EOPNOTSUPP;
172 }
173
174 /*
175 * Determine max frame size based on underlying device and optional
176 * user-configured limit. If the MFS is too low, fcoe_link_ok()
177 * will return 0, so do this first.
178 */
179 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
180 sizeof(struct fcoe_crc_eof));
181 if (fc_set_mfs(lp, mfs))
182 return -EINVAL;
183
184 lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
185 if (!fcoe_link_ok(lp))
186 lp->link_status |= FC_LINK_UP;
187
188 /* offload features support */
189 if (fc->real_dev->features & NETIF_F_SG)
190 lp->sg_supp = 1;
191
192
193 skb_queue_head_init(&fc->fcoe_pending_queue);
194
195 /* setup Source Mac Address */
196 memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
197 fc->real_dev->addr_len);
198
199 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
200 fc_set_wwnn(lp, wwnn);
201 /* XXX - 3rd arg needs to be vlan id */
202 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
203 fc_set_wwpn(lp, wwpn);
204
205 /*
206 * Add FCoE MAC address as second unicast MAC address
207 * or enter promiscuous mode if not capable of listening
208 * for multiple unicast MACs.
209 */
210 rtnl_lock();
211 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
212 dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
213 rtnl_unlock();
214
215 /*
216 * setup the receive function from ethernet driver
217 * on the ethertype for the given device
218 */
219 fc->fcoe_packet_type.func = fcoe_rcv;
220 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
221 fc->fcoe_packet_type.dev = fc->real_dev;
222 dev_add_pack(&fc->fcoe_packet_type);
223
224 return 0;
225}
226
227/*
228 * fcoe_sw_shost_config - sets up fc_lport->host
229 * @lp : ptr to the fc_lport
230 * @shost : ptr to the associated scsi host
231 * @dev : device associated to scsi host
232 *
233 * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
234 *
235 * Returns : 0 for success
236 *
237 */
238static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
239 struct device *dev)
240{
241 int rc = 0;
242
243 /* lport scsi host config */
244 lp->host = shost;
245
246 lp->host->max_lun = FCOE_MAX_LUN;
247 lp->host->max_id = FCOE_MAX_FCP_TARGET;
248 lp->host->max_channel = 0;
249 lp->host->transportt = scsi_transport_fcoe_sw;
250
251 /* add the new host to the SCSI-ml */
252 rc = scsi_add_host(lp->host, dev);
253 if (rc) {
254 FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
255 return rc;
256 }
257 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
258 FCOE_SW_NAME, FCOE_SW_VERSION,
259 fcoe_netdev(lp)->name);
260
261 return 0;
262}
263
264/*
265 * fcoe_sw_em_config - allocates em for this lport
266 * @lp: the port that em is to allocated for
267 *
268 * Returns : 0 on success
269 */
270static inline int fcoe_sw_em_config(struct fc_lport *lp)
271{
272 BUG_ON(lp->emp);
273
274 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
275 FCOE_MIN_XID, FCOE_MAX_XID);
276 if (!lp->emp)
277 return -ENOMEM;
278
279 return 0;
280}
281
282/*
283 * fcoe_sw_destroy - FCoE software HBA tear-down function
284 * @netdev: ptr to the associated net_device
285 *
286 * Returns: 0 if link is OK for use by FCoE.
287 */
288static int fcoe_sw_destroy(struct net_device *netdev)
289{
290 int cpu;
291 struct fc_lport *lp = NULL;
292 struct fcoe_softc *fc;
293 u8 flogi_maddr[ETH_ALEN];
294
295 BUG_ON(!netdev);
296
297 printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
298 netdev->name);
299
300 lp = fcoe_hostlist_lookup(netdev);
301 if (!lp)
302 return -ENODEV;
303
304 fc = fcoe_softc(lp);
305
306 /* Logout of the fabric */
307 fc_fabric_logoff(lp);
308
309 /* Remove the instance from fcoe's list */
310 fcoe_hostlist_remove(lp);
311
312 /* Don't listen for Ethernet packets anymore */
313 dev_remove_pack(&fc->fcoe_packet_type);
314
315 /* Cleanup the fc_lport */
316 fc_lport_destroy(lp);
317 fc_fcp_destroy(lp);
318
319 /* Detach from the scsi-ml */
320 fc_remove_host(lp->host);
321 scsi_remove_host(lp->host);
322
323 /* There are no more rports or I/O, free the EM */
324 if (lp->emp)
325 fc_exch_mgr_free(lp->emp);
326
327 /* Delete secondary MAC addresses */
328 rtnl_lock();
329 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
330 dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
331 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
332 dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
333 rtnl_unlock();
334
335 /* Free the per-CPU revieve threads */
336 fcoe_percpu_clean(lp);
337
338 /* Free existing skbs */
339 fcoe_clean_pending_queue(lp);
340
341 /* Free memory used by statistical counters */
342 for_each_online_cpu(cpu)
343 kfree(lp->dev_stats[cpu]);
344
345 /* Release the net_device and Scsi_Host */
346 dev_put(fc->real_dev);
347 scsi_host_put(lp->host);
348
349 return 0;
350}
351
352static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
353 .frame_send = fcoe_xmit,
354};
355
356/*
357 * fcoe_sw_create - this function creates the fcoe interface
358 * @netdev: pointer the associated netdevice
359 *
360 * Creates fc_lport struct and scsi_host for lport, configures lport
361 * and starts fabric login.
362 *
363 * Returns : 0 on success
364 */
365static int fcoe_sw_create(struct net_device *netdev)
366{
367 int rc;
368 struct fc_lport *lp = NULL;
369 struct fcoe_softc *fc;
370 struct Scsi_Host *shost;
371
372 BUG_ON(!netdev);
373
374 printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
375 netdev->name);
376
377 lp = fcoe_hostlist_lookup(netdev);
378 if (lp)
379 return -EEXIST;
380
381 shost = fcoe_host_alloc(&fcoe_sw_shost_template,
382 sizeof(struct fcoe_softc));
383 if (!shost) {
384 FC_DBG("Could not allocate host structure\n");
385 return -ENOMEM;
386 }
387 lp = shost_priv(shost);
388 fc = lport_priv(lp);
389
390 /* configure fc_lport, e.g., em */
391 rc = fcoe_sw_lport_config(lp);
392 if (rc) {
393 FC_DBG("Could not configure lport\n");
394 goto out_host_put;
395 }
396
397 /* configure lport network properties */
398 rc = fcoe_sw_netdev_config(lp, netdev);
399 if (rc) {
400 FC_DBG("Could not configure netdev for lport\n");
401 goto out_host_put;
402 }
403
404 /* configure lport scsi host properties */
405 rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
406 if (rc) {
407 FC_DBG("Could not configure shost for lport\n");
408 goto out_host_put;
409 }
410
411 /* lport exch manager allocation */
412 rc = fcoe_sw_em_config(lp);
413 if (rc) {
414 FC_DBG("Could not configure em for lport\n");
415 goto out_host_put;
416 }
417
418 /* Initialize the library */
419 rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
420 if (rc) {
421 FC_DBG("Could not configure libfc for lport!\n");
422 goto out_lp_destroy;
423 }
424
425 /* add to lports list */
426 fcoe_hostlist_add(lp);
427
428 lp->boot_time = jiffies;
429
430 fc_fabric_login(lp);
431
432 dev_hold(netdev);
433
434 return rc;
435
436out_lp_destroy:
437 fc_exch_mgr_free(lp->emp); /* Free the EM */
438out_host_put:
439 scsi_host_put(lp->host);
440 return rc;
441}
442
443/*
444 * fcoe_sw_match - the fcoe sw transport match function
445 *
446 * Returns : false always
447 */
448static bool fcoe_sw_match(struct net_device *netdev)
449{
450 /* FIXME - for sw transport, always return false */
451 return false;
452}
453
454/* the sw hba fcoe transport */
455struct fcoe_transport fcoe_sw_transport = {
456 .name = "fcoesw",
457 .create = fcoe_sw_create,
458 .destroy = fcoe_sw_destroy,
459 .match = fcoe_sw_match,
460 .vendor = 0x0,
461 .device = 0xffff,
462};
463
464/*
465 * fcoe_sw_init - registers fcoe_sw_transport
466 *
467 * Returns : 0 on success
468 */
469int __init fcoe_sw_init(void)
470{
471 /* attach to scsi transport */
472 scsi_transport_fcoe_sw =
473 fc_attach_transport(&fcoe_sw_transport_function);
474 if (!scsi_transport_fcoe_sw) {
475 printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
476 return -ENODEV;
477 }
478 /* register sw transport */
479 fcoe_transport_register(&fcoe_sw_transport);
480 return 0;
481}
482
483/*
484 * fcoe_sw_exit - unregisters fcoe_sw_transport
485 *
486 * Returns : 0 on success
487 */
488int __exit fcoe_sw_exit(void)
489{
490 /* dettach the transport */
491 fc_release_transport(scsi_transport_fcoe_sw);
492 fcoe_transport_unregister(&fcoe_sw_transport);
493 return 0;
494}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
new file mode 100644
index 000000000000..e419f486cdb3
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -0,0 +1,1510 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
24#include <linux/skbuff.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/if_ether.h>
29#include <linux/if_vlan.h>
30#include <linux/kthread.h>
31#include <linux/crc32.h>
32#include <linux/cpu.h>
33#include <linux/fs.h>
34#include <linux/sysfs.h>
35#include <linux/ctype.h>
36#include <scsi/scsi_tcq.h>
37#include <scsi/scsicam.h>
38#include <scsi/scsi_transport.h>
39#include <scsi/scsi_transport_fc.h>
40#include <net/rtnetlink.h>
41
42#include <scsi/fc/fc_encaps.h>
43
44#include <scsi/libfc.h>
45#include <scsi/fc_frame.h>
46#include <scsi/libfcoe.h>
47#include <scsi/fc_transport_fcoe.h>
48
49static int debug_fcoe;
50
51#define FCOE_MAX_QUEUE_DEPTH 256
52
53/* destination address mode */
54#define FCOE_GW_ADDR_MODE 0x00
55#define FCOE_FCOUI_ADDR_MODE 0x01
56
57#define FCOE_WORD_TO_BYTE 4
58
59MODULE_AUTHOR("Open-FCoE.org");
60MODULE_DESCRIPTION("FCoE");
61MODULE_LICENSE("GPL");
62
63/* fcoe host list */
64LIST_HEAD(fcoe_hostlist);
65DEFINE_RWLOCK(fcoe_hostlist_lock);
66DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
67struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
68
69
70/* Function Prototyes */
71static int fcoe_check_wait_queue(struct fc_lport *);
72static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
73static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
74static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
75#ifdef CONFIG_HOTPLUG_CPU
76static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
77#endif /* CONFIG_HOTPLUG_CPU */
78static int fcoe_device_notification(struct notifier_block *, ulong, void *);
79static void fcoe_dev_setup(void);
80static void fcoe_dev_cleanup(void);
81
82/* notification function from net device */
83static struct notifier_block fcoe_notifier = {
84 .notifier_call = fcoe_device_notification,
85};
86
87
88#ifdef CONFIG_HOTPLUG_CPU
89static struct notifier_block fcoe_cpu_notifier = {
90 .notifier_call = fcoe_cpu_callback,
91};
92
93/**
94 * fcoe_create_percpu_data - creates the associated cpu data
95 * @cpu: index for the cpu where fcoe cpu data will be created
96 *
97 * create percpu stats block, from cpu add notifier
98 *
99 * Returns: none
100 **/
101static void fcoe_create_percpu_data(int cpu)
102{
103 struct fc_lport *lp;
104 struct fcoe_softc *fc;
105
106 write_lock_bh(&fcoe_hostlist_lock);
107 list_for_each_entry(fc, &fcoe_hostlist, list) {
108 lp = fc->lp;
109 if (lp->dev_stats[cpu] == NULL)
110 lp->dev_stats[cpu] =
111 kzalloc(sizeof(struct fcoe_dev_stats),
112 GFP_KERNEL);
113 }
114 write_unlock_bh(&fcoe_hostlist_lock);
115}
116
117/**
118 * fcoe_destroy_percpu_data - destroys the associated cpu data
119 * @cpu: index for the cpu where fcoe cpu data will destroyed
120 *
121 * destroy percpu stats block called by cpu add/remove notifier
122 *
123 * Retuns: none
124 **/
125static void fcoe_destroy_percpu_data(int cpu)
126{
127 struct fc_lport *lp;
128 struct fcoe_softc *fc;
129
130 write_lock_bh(&fcoe_hostlist_lock);
131 list_for_each_entry(fc, &fcoe_hostlist, list) {
132 lp = fc->lp;
133 kfree(lp->dev_stats[cpu]);
134 lp->dev_stats[cpu] = NULL;
135 }
136 write_unlock_bh(&fcoe_hostlist_lock);
137}
138
139/**
140 * fcoe_cpu_callback - fcoe cpu hotplug event callback
141 * @nfb: callback data block
142 * @action: event triggering the callback
143 * @hcpu: index for the cpu of this event
144 *
145 * this creates or destroys per cpu data for fcoe
146 *
147 * Returns NOTIFY_OK always.
148 **/
149static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
150 void *hcpu)
151{
152 unsigned int cpu = (unsigned long)hcpu;
153
154 switch (action) {
155 case CPU_ONLINE:
156 fcoe_create_percpu_data(cpu);
157 break;
158 case CPU_DEAD:
159 fcoe_destroy_percpu_data(cpu);
160 break;
161 default:
162 break;
163 }
164 return NOTIFY_OK;
165}
166#endif /* CONFIG_HOTPLUG_CPU */
167
168/**
169 * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
170 * @skb: the receive skb
171 * @dev: associated net device
172 * @ptype: context
173 * @odldev: last device
174 *
175 * this function will receive the packet and build fc frame and pass it up
176 *
177 * Returns: 0 for success
178 **/
179int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
180 struct packet_type *ptype, struct net_device *olddev)
181{
182 struct fc_lport *lp;
183 struct fcoe_rcv_info *fr;
184 struct fcoe_softc *fc;
185 struct fcoe_dev_stats *stats;
186 struct fc_frame_header *fh;
187 unsigned short oxid;
188 int cpu_idx;
189 struct fcoe_percpu_s *fps;
190
191 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
192 lp = fc->lp;
193 if (unlikely(lp == NULL)) {
194 FC_DBG("cannot find hba structure");
195 goto err2;
196 }
197
198 if (unlikely(debug_fcoe)) {
199 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
200 "end:%p sum:%d dev:%s", skb->len, skb->data_len,
201 skb->head, skb->data, skb_tail_pointer(skb),
202 skb_end_pointer(skb), skb->csum,
203 skb->dev ? skb->dev->name : "<NULL>");
204
205 }
206
207 /* check for FCOE packet type */
208 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
209 FC_DBG("wrong FC type frame");
210 goto err;
211 }
212
213 /*
214 * Check for minimum frame length, and make sure required FCoE
215 * and FC headers are pulled into the linear data area.
216 */
217 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
218 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
219 goto err;
220
221 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
222 fh = (struct fc_frame_header *) skb_transport_header(skb);
223
224 oxid = ntohs(fh->fh_ox_id);
225
226 fr = fcoe_dev_from_skb(skb);
227 fr->fr_dev = lp;
228 fr->ptype = ptype;
229 cpu_idx = 0;
230#ifdef CONFIG_SMP
231 /*
232 * The incoming frame exchange id(oxid) is ANDed with num of online
233 * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
234 * a per cpu kernel thread from fcoe_percpu. In case the cpu is
235 * offline or no kernel thread for derived cpu_idx then cpu_idx is
236 * initialize to first online cpu index.
237 */
238 cpu_idx = oxid & (num_online_cpus() - 1);
239 if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
240 cpu_idx = first_cpu(cpu_online_map);
241#endif
242 fps = fcoe_percpu[cpu_idx];
243
244 spin_lock_bh(&fps->fcoe_rx_list.lock);
245 __skb_queue_tail(&fps->fcoe_rx_list, skb);
246 if (fps->fcoe_rx_list.qlen == 1)
247 wake_up_process(fps->thread);
248
249 spin_unlock_bh(&fps->fcoe_rx_list.lock);
250
251 return 0;
252err:
253#ifdef CONFIG_SMP
254 stats = lp->dev_stats[smp_processor_id()];
255#else
256 stats = lp->dev_stats[0];
257#endif
258 if (stats)
259 stats->ErrorFrames++;
260
261err2:
262 kfree_skb(skb);
263 return -1;
264}
265EXPORT_SYMBOL_GPL(fcoe_rcv);
266
267/**
268 * fcoe_start_io - pass to netdev to start xmit for fcoe
269 * @skb: the skb to be xmitted
270 *
271 * Returns: 0 for success
272 **/
273static inline int fcoe_start_io(struct sk_buff *skb)
274{
275 int rc;
276
277 skb_get(skb);
278 rc = dev_queue_xmit(skb);
279 if (rc != 0)
280 return rc;
281 kfree_skb(skb);
282 return 0;
283}
284
285/**
286 * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
287 * @skb: the skb to be xmitted
288 * @tlen: total len
289 *
290 * Returns: 0 for success
291 **/
292static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
293{
294 struct fcoe_percpu_s *fps;
295 struct page *page;
296 int cpu_idx;
297
298 cpu_idx = get_cpu();
299 fps = fcoe_percpu[cpu_idx];
300 page = fps->crc_eof_page;
301 if (!page) {
302 page = alloc_page(GFP_ATOMIC);
303 if (!page) {
304 put_cpu();
305 return -ENOMEM;
306 }
307 fps->crc_eof_page = page;
308 WARN_ON(fps->crc_eof_offset != 0);
309 }
310
311 get_page(page);
312 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
313 fps->crc_eof_offset, tlen);
314 skb->len += tlen;
315 skb->data_len += tlen;
316 skb->truesize += tlen;
317 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
318
319 if (fps->crc_eof_offset >= PAGE_SIZE) {
320 fps->crc_eof_page = NULL;
321 fps->crc_eof_offset = 0;
322 put_page(page);
323 }
324 put_cpu();
325 return 0;
326}
327
328/**
329 * fcoe_fc_crc - calculates FC CRC in this fcoe skb
330 * @fp: the fc_frame containg data to be checksummed
331 *
332 * This uses crc32() to calculate the crc for fc frame
333 * Return : 32 bit crc
334 *
335 **/
336u32 fcoe_fc_crc(struct fc_frame *fp)
337{
338 struct sk_buff *skb = fp_skb(fp);
339 struct skb_frag_struct *frag;
340 unsigned char *data;
341 unsigned long off, len, clen;
342 u32 crc;
343 unsigned i;
344
345 crc = crc32(~0, skb->data, skb_headlen(skb));
346
347 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
348 frag = &skb_shinfo(skb)->frags[i];
349 off = frag->page_offset;
350 len = frag->size;
351 while (len > 0) {
352 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
353 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
354 KM_SKB_DATA_SOFTIRQ);
355 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
356 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
357 off += clen;
358 len -= clen;
359 }
360 }
361 return crc;
362}
363EXPORT_SYMBOL_GPL(fcoe_fc_crc);
364
365/**
366 * fcoe_xmit - FCoE frame transmit function
367 * @lp: the associated local port
368 * @fp: the fc_frame to be transmitted
369 *
370 * Return : 0 for success
371 *
372 **/
373int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
374{
375 int wlen, rc = 0;
376 u32 crc;
377 struct ethhdr *eh;
378 struct fcoe_crc_eof *cp;
379 struct sk_buff *skb;
380 struct fcoe_dev_stats *stats;
381 struct fc_frame_header *fh;
382 unsigned int hlen; /* header length implies the version */
383 unsigned int tlen; /* trailer length */
384 unsigned int elen; /* eth header, may include vlan */
385 int flogi_in_progress = 0;
386 struct fcoe_softc *fc;
387 u8 sof, eof;
388 struct fcoe_hdr *hp;
389
390 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
391
392 fc = fcoe_softc(lp);
393 /*
394 * if it is a flogi then we need to learn gw-addr
395 * and my own fcid
396 */
397 fh = fc_frame_header_get(fp);
398 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
399 if (fc_frame_payload_op(fp) == ELS_FLOGI) {
400 fc->flogi_oxid = ntohs(fh->fh_ox_id);
401 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
402 fc->flogi_progress = 1;
403 flogi_in_progress = 1;
404 } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
405 /*
406 * Here we must've gotten an SID by accepting an FLOGI
407 * from a point-to-point connection. Switch to using
408 * the source mac based on the SID. The destination
409 * MAC in this case would have been set by receving the
410 * FLOGI.
411 */
412 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
413 fc->flogi_progress = 0;
414 }
415 }
416
417 skb = fp_skb(fp);
418 sof = fr_sof(fp);
419 eof = fr_eof(fp);
420
421 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
422 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
423 hlen = sizeof(struct fcoe_hdr);
424 tlen = sizeof(struct fcoe_crc_eof);
425 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
426
427 /* crc offload */
428 if (likely(lp->crc_offload)) {
429 skb->ip_summed = CHECKSUM_COMPLETE;
430 skb->csum_start = skb_headroom(skb);
431 skb->csum_offset = skb->len;
432 crc = 0;
433 } else {
434 skb->ip_summed = CHECKSUM_NONE;
435 crc = fcoe_fc_crc(fp);
436 }
437
438 /* copy fc crc and eof to the skb buff */
439 if (skb_is_nonlinear(skb)) {
440 skb_frag_t *frag;
441 if (fcoe_get_paged_crc_eof(skb, tlen)) {
442 kfree(skb);
443 return -ENOMEM;
444 }
445 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
446 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
447 + frag->page_offset;
448 } else {
449 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
450 }
451
452 memset(cp, 0, sizeof(*cp));
453 cp->fcoe_eof = eof;
454 cp->fcoe_crc32 = cpu_to_le32(~crc);
455
456 if (skb_is_nonlinear(skb)) {
457 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
458 cp = NULL;
459 }
460
461 /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
462 skb_push(skb, elen + hlen);
463 skb_reset_mac_header(skb);
464 skb_reset_network_header(skb);
465 skb->mac_len = elen;
466 skb->protocol = htons(ETH_P_802_3);
467 skb->dev = fc->real_dev;
468
469 /* fill up mac and fcoe headers */
470 eh = eth_hdr(skb);
471 eh->h_proto = htons(ETH_P_FCOE);
472 if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
473 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
474 else
475 /* insert GW address */
476 memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
477
478 if (unlikely(flogi_in_progress))
479 memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
480 else
481 memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
482
483 hp = (struct fcoe_hdr *)(eh + 1);
484 memset(hp, 0, sizeof(*hp));
485 if (FC_FCOE_VER)
486 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
487 hp->fcoe_sof = sof;
488
489 /* update tx stats: regardless if LLD fails */
490 stats = lp->dev_stats[smp_processor_id()];
491 if (stats) {
492 stats->TxFrames++;
493 stats->TxWords += wlen;
494 }
495
496 /* send down to lld */
497 fr_dev(fp) = lp;
498 if (fc->fcoe_pending_queue.qlen)
499 rc = fcoe_check_wait_queue(lp);
500
501 if (rc == 0)
502 rc = fcoe_start_io(skb);
503
504 if (rc) {
505 fcoe_insert_wait_queue(lp, skb);
506 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
507 fc_pause(lp);
508 }
509
510 return 0;
511}
512EXPORT_SYMBOL_GPL(fcoe_xmit);
513
514/*
515 * fcoe_percpu_receive_thread - recv thread per cpu
516 * @arg: ptr to the fcoe per cpu struct
517 *
518 * Return: 0 for success
519 *
520 */
521int fcoe_percpu_receive_thread(void *arg)
522{
523 struct fcoe_percpu_s *p = arg;
524 u32 fr_len;
525 struct fc_lport *lp;
526 struct fcoe_rcv_info *fr;
527 struct fcoe_dev_stats *stats;
528 struct fc_frame_header *fh;
529 struct sk_buff *skb;
530 struct fcoe_crc_eof crc_eof;
531 struct fc_frame *fp;
532 u8 *mac = NULL;
533 struct fcoe_softc *fc;
534 struct fcoe_hdr *hp;
535
536 set_user_nice(current, 19);
537
538 while (!kthread_should_stop()) {
539
540 spin_lock_bh(&p->fcoe_rx_list.lock);
541 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
542 set_current_state(TASK_INTERRUPTIBLE);
543 spin_unlock_bh(&p->fcoe_rx_list.lock);
544 schedule();
545 set_current_state(TASK_RUNNING);
546 if (kthread_should_stop())
547 return 0;
548 spin_lock_bh(&p->fcoe_rx_list.lock);
549 }
550 spin_unlock_bh(&p->fcoe_rx_list.lock);
551 fr = fcoe_dev_from_skb(skb);
552 lp = fr->fr_dev;
553 if (unlikely(lp == NULL)) {
554 FC_DBG("invalid HBA Structure");
555 kfree_skb(skb);
556 continue;
557 }
558
559 stats = lp->dev_stats[smp_processor_id()];
560
561 if (unlikely(debug_fcoe)) {
562 FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
563 "tail:%p end:%p sum:%d dev:%s",
564 skb->len, skb->data_len,
565 skb->head, skb->data, skb_tail_pointer(skb),
566 skb_end_pointer(skb), skb->csum,
567 skb->dev ? skb->dev->name : "<NULL>");
568 }
569
570 /*
571 * Save source MAC address before discarding header.
572 */
573 fc = lport_priv(lp);
574 if (unlikely(fc->flogi_progress))
575 mac = eth_hdr(skb)->h_source;
576
577 if (skb_is_nonlinear(skb))
578 skb_linearize(skb); /* not ideal */
579
580 /*
581 * Frame length checks and setting up the header pointers
582 * was done in fcoe_rcv already.
583 */
584 hp = (struct fcoe_hdr *) skb_network_header(skb);
585 fh = (struct fc_frame_header *) skb_transport_header(skb);
586
587 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
588 if (stats) {
589 if (stats->ErrorFrames < 5)
590 FC_DBG("unknown FCoE version %x",
591 FC_FCOE_DECAPS_VER(hp));
592 stats->ErrorFrames++;
593 }
594 kfree_skb(skb);
595 continue;
596 }
597
598 skb_pull(skb, sizeof(struct fcoe_hdr));
599 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
600
601 if (stats) {
602 stats->RxFrames++;
603 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
604 }
605
606 fp = (struct fc_frame *)skb;
607 fc_frame_init(fp);
608 fr_dev(fp) = lp;
609 fr_sof(fp) = hp->fcoe_sof;
610
611 /* Copy out the CRC and EOF trailer for access */
612 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
613 kfree_skb(skb);
614 continue;
615 }
616 fr_eof(fp) = crc_eof.fcoe_eof;
617 fr_crc(fp) = crc_eof.fcoe_crc32;
618 if (pskb_trim(skb, fr_len)) {
619 kfree_skb(skb);
620 continue;
621 }
622
623 /*
624 * We only check CRC if no offload is available and if it is
625 * it's solicited data, in which case, the FCP layer would
626 * check it during the copy.
627 */
628 if (lp->crc_offload)
629 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
630 else
631 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
632
633 fh = fc_frame_header_get(fp);
634 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
635 fh->fh_type == FC_TYPE_FCP) {
636 fc_exch_recv(lp, lp->emp, fp);
637 continue;
638 }
639 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
640 if (le32_to_cpu(fr_crc(fp)) !=
641 ~crc32(~0, skb->data, fr_len)) {
642 if (debug_fcoe || stats->InvalidCRCCount < 5)
643 printk(KERN_WARNING "fcoe: dropping "
644 "frame with CRC error\n");
645 stats->InvalidCRCCount++;
646 stats->ErrorFrames++;
647 fc_frame_free(fp);
648 continue;
649 }
650 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
651 }
652 /* non flogi and non data exchanges are handled here */
653 if (unlikely(fc->flogi_progress))
654 fcoe_recv_flogi(fc, fp, mac);
655 fc_exch_recv(lp, lp->emp, fp);
656 }
657 return 0;
658}
659
660/**
661 * fcoe_recv_flogi - flogi receive function
662 * @fc: associated fcoe_softc
663 * @fp: the recieved frame
664 * @sa: the source address of this flogi
665 *
666 * This is responsible to parse the flogi response and sets the corresponding
667 * mac address for the initiator, eitehr OUI based or GW based.
668 *
669 * Returns: none
670 **/
671static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
672{
673 struct fc_frame_header *fh;
674 u8 op;
675
676 fh = fc_frame_header_get(fp);
677 if (fh->fh_type != FC_TYPE_ELS)
678 return;
679 op = fc_frame_payload_op(fp);
680 if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
681 fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
682 /*
683 * FLOGI accepted.
684 * If the src mac addr is FC_OUI-based, then we mark the
685 * address_mode flag to use FC_OUI-based Ethernet DA.
686 * Otherwise we use the FCoE gateway addr
687 */
688 if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
689 fc->address_mode = FCOE_FCOUI_ADDR_MODE;
690 } else {
691 memcpy(fc->dest_addr, sa, ETH_ALEN);
692 fc->address_mode = FCOE_GW_ADDR_MODE;
693 }
694
695 /*
696 * Remove any previously-set unicast MAC filter.
697 * Add secondary FCoE MAC address filter for our OUI.
698 */
699 rtnl_lock();
700 if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
701 dev_unicast_delete(fc->real_dev, fc->data_src_addr,
702 ETH_ALEN);
703 fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
704 dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
705 rtnl_unlock();
706
707 fc->flogi_progress = 0;
708 } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
709 /*
710 * Save source MAC for point-to-point responses.
711 */
712 memcpy(fc->dest_addr, sa, ETH_ALEN);
713 fc->address_mode = FCOE_GW_ADDR_MODE;
714 }
715}
716
717/**
718 * fcoe_watchdog - fcoe timer callback
719 * @vp:
720 *
721 * This checks the pending queue length for fcoe and put fcoe to be paused state
722 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
723 * fcoe_hostlist.
724 *
725 * Returns: 0 for success
726 **/
727void fcoe_watchdog(ulong vp)
728{
729 struct fc_lport *lp;
730 struct fcoe_softc *fc;
731 int paused = 0;
732
733 read_lock(&fcoe_hostlist_lock);
734 list_for_each_entry(fc, &fcoe_hostlist, list) {
735 lp = fc->lp;
736 if (lp) {
737 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
738 paused = 1;
739 if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
740 if (paused)
741 fc_unpause(lp);
742 }
743 }
744 }
745 read_unlock(&fcoe_hostlist_lock);
746
747 fcoe_timer.expires = jiffies + (1 * HZ);
748 add_timer(&fcoe_timer);
749}
750
751
752/**
753 * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
754 * @lp: the fc_port for this skb
755 * @skb: the associated skb to be xmitted
756 *
757 * This empties the wait_queue, dequeue the head of the wait_queue queue
758 * and calls fcoe_start_io() for each packet, if all skb have been
759 * transmitted, return 0 if a error occurs, then restore wait_queue and
760 * try again later.
761 *
762 * The wait_queue is used when the skb transmit fails. skb will go
763 * in the wait_queue which will be emptied by the time function OR
764 * by the next skb transmit.
765 *
766 * Returns: 0 for success
767 **/
768static int fcoe_check_wait_queue(struct fc_lport *lp)
769{
770 int rc, unpause = 0;
771 int paused = 0;
772 struct sk_buff *skb;
773 struct fcoe_softc *fc;
774
775 fc = fcoe_softc(lp);
776 spin_lock_bh(&fc->fcoe_pending_queue.lock);
777
778 /*
779 * is this interface paused?
780 */
781 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
782 paused = 1;
783 if (fc->fcoe_pending_queue.qlen) {
784 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
785 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
786 rc = fcoe_start_io(skb);
787 if (rc) {
788 fcoe_insert_wait_queue_head(lp, skb);
789 return rc;
790 }
791 spin_lock_bh(&fc->fcoe_pending_queue.lock);
792 }
793 if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
794 unpause = 1;
795 }
796 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
797 if ((unpause) && (paused))
798 fc_unpause(lp);
799 return fc->fcoe_pending_queue.qlen;
800}
801
802/**
803 * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
804 * @lp: the fc_port for this skb
805 * @skb: the associated skb to be xmitted
806 *
807 * Returns: none
808 **/
809static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
810 struct sk_buff *skb)
811{
812 struct fcoe_softc *fc;
813
814 fc = fcoe_softc(lp);
815 spin_lock_bh(&fc->fcoe_pending_queue.lock);
816 __skb_queue_head(&fc->fcoe_pending_queue, skb);
817 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
818}
819
820/**
821 * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
822 * @lp: the fc_port for this skb
823 * @skb: the associated skb to be xmitted
824 *
825 * Returns: none
826 **/
827static void fcoe_insert_wait_queue(struct fc_lport *lp,
828 struct sk_buff *skb)
829{
830 struct fcoe_softc *fc;
831
832 fc = fcoe_softc(lp);
833 spin_lock_bh(&fc->fcoe_pending_queue.lock);
834 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
835 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
836}
837
838/**
839 * fcoe_dev_setup - setup link change notification interface
840 *
841 **/
842static void fcoe_dev_setup(void)
843{
844 /*
845 * here setup a interface specific wd time to
846 * monitor the link state
847 */
848 register_netdevice_notifier(&fcoe_notifier);
849}
850
851/**
852 * fcoe_dev_setup - cleanup link change notification interface
853 **/
854static void fcoe_dev_cleanup(void)
855{
856 unregister_netdevice_notifier(&fcoe_notifier);
857}
858
859/**
860 * fcoe_device_notification - netdev event notification callback
861 * @notifier: context of the notification
862 * @event: type of event
863 * @ptr: fixed array for output parsed ifname
864 *
865 * This function is called by the ethernet driver in case of link change event
866 *
867 * Returns: 0 for success
868 **/
869static int fcoe_device_notification(struct notifier_block *notifier,
870 ulong event, void *ptr)
871{
872 struct fc_lport *lp = NULL;
873 struct net_device *real_dev = ptr;
874 struct fcoe_softc *fc;
875 struct fcoe_dev_stats *stats;
876 u16 new_status;
877 u32 mfs;
878 int rc = NOTIFY_OK;
879
880 read_lock(&fcoe_hostlist_lock);
881 list_for_each_entry(fc, &fcoe_hostlist, list) {
882 if (fc->real_dev == real_dev) {
883 lp = fc->lp;
884 break;
885 }
886 }
887 read_unlock(&fcoe_hostlist_lock);
888 if (lp == NULL) {
889 rc = NOTIFY_DONE;
890 goto out;
891 }
892
893 new_status = lp->link_status;
894 switch (event) {
895 case NETDEV_DOWN:
896 case NETDEV_GOING_DOWN:
897 new_status &= ~FC_LINK_UP;
898 break;
899 case NETDEV_UP:
900 case NETDEV_CHANGE:
901 new_status &= ~FC_LINK_UP;
902 if (!fcoe_link_ok(lp))
903 new_status |= FC_LINK_UP;
904 break;
905 case NETDEV_CHANGEMTU:
906 mfs = fc->real_dev->mtu -
907 (sizeof(struct fcoe_hdr) +
908 sizeof(struct fcoe_crc_eof));
909 if (mfs >= FC_MIN_MAX_FRAME)
910 fc_set_mfs(lp, mfs);
911 new_status &= ~FC_LINK_UP;
912 if (!fcoe_link_ok(lp))
913 new_status |= FC_LINK_UP;
914 break;
915 case NETDEV_REGISTER:
916 break;
917 default:
918 FC_DBG("unknown event %ld call", event);
919 }
920 if (lp->link_status != new_status) {
921 if ((new_status & FC_LINK_UP) == FC_LINK_UP)
922 fc_linkup(lp);
923 else {
924 stats = lp->dev_stats[smp_processor_id()];
925 if (stats)
926 stats->LinkFailureCount++;
927 fc_linkdown(lp);
928 fcoe_clean_pending_queue(lp);
929 }
930 }
931out:
932 return rc;
933}
934
935/**
936 * fcoe_if_to_netdev - parse a name buffer to get netdev
937 * @ifname: fixed array for output parsed ifname
938 * @buffer: incoming buffer to be copied
939 *
940 * Returns: NULL or ptr to netdeive
941 **/
942static struct net_device *fcoe_if_to_netdev(const char *buffer)
943{
944 char *cp;
945 char ifname[IFNAMSIZ + 2];
946
947 if (buffer) {
948 strlcpy(ifname, buffer, IFNAMSIZ);
949 cp = ifname + strlen(ifname);
950 while (--cp >= ifname && *cp == '\n')
951 *cp = '\0';
952 return dev_get_by_name(&init_net, ifname);
953 }
954 return NULL;
955}
956
957/**
958 * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev
959 * @netdev: the target netdev
960 *
961 * Returns: ptr to the struct module, NULL for failure
962 **/
963static struct module *fcoe_netdev_to_module_owner(
964 const struct net_device *netdev)
965{
966 struct device *dev;
967
968 if (!netdev)
969 return NULL;
970
971 dev = netdev->dev.parent;
972 if (!dev)
973 return NULL;
974
975 if (!dev->driver)
976 return NULL;
977
978 return dev->driver->owner;
979}
980
981/**
982 * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for
983 * the corresponding netdev.
984 * @netdev: the target netdev
985 *
986 * Returns: 0 for succsss
987 **/
988static int fcoe_ethdrv_get(const struct net_device *netdev)
989{
990 struct module *owner;
991
992 owner = fcoe_netdev_to_module_owner(netdev);
993 if (owner) {
994 printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
995 module_name(owner), netdev->name);
996 return try_module_get(owner);
997 }
998 return -ENODEV;
999}
1000
1001/**
1002 * fcoe_ethdrv_get - releases the nic driver module by module_put for
1003 * the corresponding netdev.
1004 * @netdev: the target netdev
1005 *
1006 * Returns: 0 for succsss
1007 **/
1008static int fcoe_ethdrv_put(const struct net_device *netdev)
1009{
1010 struct module *owner;
1011
1012 owner = fcoe_netdev_to_module_owner(netdev);
1013 if (owner) {
1014 printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1015 module_name(owner), netdev->name);
1016 module_put(owner);
1017 return 0;
1018 }
1019 return -ENODEV;
1020}
1021
1022/**
1023 * fcoe_destroy- handles the destroy from sysfs
1024 * @buffer: expcted to be a eth if name
1025 * @kp: associated kernel param
1026 *
1027 * Returns: 0 for success
1028 **/
1029static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1030{
1031 int rc;
1032 struct net_device *netdev;
1033
1034 netdev = fcoe_if_to_netdev(buffer);
1035 if (!netdev) {
1036 rc = -ENODEV;
1037 goto out_nodev;
1038 }
1039 /* look for existing lport */
1040 if (!fcoe_hostlist_lookup(netdev)) {
1041 rc = -ENODEV;
1042 goto out_putdev;
1043 }
1044 /* pass to transport */
1045 rc = fcoe_transport_release(netdev);
1046 if (rc) {
1047 printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
1048 netdev->name);
1049 rc = -EIO;
1050 goto out_putdev;
1051 }
1052 fcoe_ethdrv_put(netdev);
1053 rc = 0;
1054out_putdev:
1055 dev_put(netdev);
1056out_nodev:
1057 return rc;
1058}
1059
1060/**
1061 * fcoe_create - handles the create call from sysfs
1062 * @buffer: expcted to be a eth if name
1063 * @kp: associated kernel param
1064 *
1065 * Returns: 0 for success
1066 **/
1067static int fcoe_create(const char *buffer, struct kernel_param *kp)
1068{
1069 int rc;
1070 struct net_device *netdev;
1071
1072 netdev = fcoe_if_to_netdev(buffer);
1073 if (!netdev) {
1074 rc = -ENODEV;
1075 goto out_nodev;
1076 }
1077 /* look for existing lport */
1078 if (fcoe_hostlist_lookup(netdev)) {
1079 rc = -EEXIST;
1080 goto out_putdev;
1081 }
1082 fcoe_ethdrv_get(netdev);
1083
1084 /* pass to transport */
1085 rc = fcoe_transport_attach(netdev);
1086 if (rc) {
1087 printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
1088 netdev->name);
1089 fcoe_ethdrv_put(netdev);
1090 rc = -EIO;
1091 goto out_putdev;
1092 }
1093 rc = 0;
1094out_putdev:
1095 dev_put(netdev);
1096out_nodev:
1097 return rc;
1098}
1099
1100module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1101__MODULE_PARM_TYPE(create, "string");
1102MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1103module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1104__MODULE_PARM_TYPE(destroy, "string");
1105MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1106
1107/*
1108 * fcoe_link_ok - check if link is ok for the fc_lport
1109 * @lp: ptr to the fc_lport
1110 *
1111 * Any permanently-disqualifying conditions have been previously checked.
1112 * This also updates the speed setting, which may change with link for 100/1000.
1113 *
1114 * This function should probably be checking for PAUSE support at some point
1115 * in the future. Currently Per-priority-pause is not determinable using
1116 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1117 *
1118 * Returns: 0 if link is OK for use by FCoE.
1119 *
1120 */
1121int fcoe_link_ok(struct fc_lport *lp)
1122{
1123 struct fcoe_softc *fc = fcoe_softc(lp);
1124 struct net_device *dev = fc->real_dev;
1125 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1126 int rc = 0;
1127
1128 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1129 dev = fc->phys_dev;
1130 if (dev->ethtool_ops->get_settings) {
1131 dev->ethtool_ops->get_settings(dev, &ecmd);
1132 lp->link_supported_speeds &=
1133 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1134 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1135 SUPPORTED_1000baseT_Full))
1136 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1137 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1138 lp->link_supported_speeds |=
1139 FC_PORTSPEED_10GBIT;
1140 if (ecmd.speed == SPEED_1000)
1141 lp->link_speed = FC_PORTSPEED_1GBIT;
1142 if (ecmd.speed == SPEED_10000)
1143 lp->link_speed = FC_PORTSPEED_10GBIT;
1144 }
1145 } else
1146 rc = -1;
1147
1148 return rc;
1149}
1150EXPORT_SYMBOL_GPL(fcoe_link_ok);
1151
1152/*
1153 * fcoe_percpu_clean - frees skb of the corresponding lport from the per
1154 * cpu queue.
1155 * @lp: the fc_lport
1156 */
1157void fcoe_percpu_clean(struct fc_lport *lp)
1158{
1159 int idx;
1160 struct fcoe_percpu_s *pp;
1161 struct fcoe_rcv_info *fr;
1162 struct sk_buff_head *list;
1163 struct sk_buff *skb, *next;
1164 struct sk_buff *head;
1165
1166 for (idx = 0; idx < NR_CPUS; idx++) {
1167 if (fcoe_percpu[idx]) {
1168 pp = fcoe_percpu[idx];
1169 spin_lock_bh(&pp->fcoe_rx_list.lock);
1170 list = &pp->fcoe_rx_list;
1171 head = list->next;
1172 for (skb = head; skb != (struct sk_buff *)list;
1173 skb = next) {
1174 next = skb->next;
1175 fr = fcoe_dev_from_skb(skb);
1176 if (fr->fr_dev == lp) {
1177 __skb_unlink(skb, list);
1178 kfree_skb(skb);
1179 }
1180 }
1181 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1182 }
1183 }
1184}
1185EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
1186
1187/**
1188 * fcoe_clean_pending_queue - dequeue skb and free it
1189 * @lp: the corresponding fc_lport
1190 *
1191 * Returns: none
1192 **/
1193void fcoe_clean_pending_queue(struct fc_lport *lp)
1194{
1195 struct fcoe_softc *fc = lport_priv(lp);
1196 struct sk_buff *skb;
1197
1198 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1199 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1200 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1201 kfree_skb(skb);
1202 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1203 }
1204 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1205}
1206EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
1207
1208/**
1209 * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
1210 * @sht: ptr to the scsi host templ
1211 * @priv_size: size of private data after fc_lport
1212 *
1213 * Returns: ptr to Scsi_Host
1214 * TODO - to libfc?
1215 */
1216static inline struct Scsi_Host *libfc_host_alloc(
1217 struct scsi_host_template *sht, int priv_size)
1218{
1219 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
1220}
1221
1222/**
1223 * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
1224 * @sht: ptr to the scsi host templ
1225 * @priv_size: size of private data after fc_lport
1226 *
1227 * Returns: ptr to Scsi_Host
1228 */
1229struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
1230{
1231 return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
1232}
1233EXPORT_SYMBOL_GPL(fcoe_host_alloc);
1234
1235/*
1236 * fcoe_reset - resets the fcoe
1237 * @shost: shost the reset is from
1238 *
1239 * Returns: always 0
1240 */
1241int fcoe_reset(struct Scsi_Host *shost)
1242{
1243 struct fc_lport *lport = shost_priv(shost);
1244 fc_lport_reset(lport);
1245 return 0;
1246}
1247EXPORT_SYMBOL_GPL(fcoe_reset);
1248
1249/*
1250 * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
1251 * @mac: mac address
1252 * @scheme: check port
1253 * @port: port indicator for converting
1254 *
1255 * Returns: u64 fc world wide name
1256 */
1257u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
1258 unsigned int scheme, unsigned int port)
1259{
1260 u64 wwn;
1261 u64 host_mac;
1262
1263 /* The MAC is in NO, so flip only the low 48 bits */
1264 host_mac = ((u64) mac[0] << 40) |
1265 ((u64) mac[1] << 32) |
1266 ((u64) mac[2] << 24) |
1267 ((u64) mac[3] << 16) |
1268 ((u64) mac[4] << 8) |
1269 (u64) mac[5];
1270
1271 WARN_ON(host_mac >= (1ULL << 48));
1272 wwn = host_mac | ((u64) scheme << 60);
1273 switch (scheme) {
1274 case 1:
1275 WARN_ON(port != 0);
1276 break;
1277 case 2:
1278 WARN_ON(port >= 0xfff);
1279 wwn |= (u64) port << 48;
1280 break;
1281 default:
1282 WARN_ON(1);
1283 break;
1284 }
1285
1286 return wwn;
1287}
1288EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
1289/*
1290 * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
1291 * @device: this is currently ptr to net_device
1292 *
1293 * Returns: NULL or the located fcoe_softc
1294 */
1295static struct fcoe_softc *fcoe_hostlist_lookup_softc(
1296 const struct net_device *dev)
1297{
1298 struct fcoe_softc *fc;
1299
1300 read_lock(&fcoe_hostlist_lock);
1301 list_for_each_entry(fc, &fcoe_hostlist, list) {
1302 if (fc->real_dev == dev) {
1303 read_unlock(&fcoe_hostlist_lock);
1304 return fc;
1305 }
1306 }
1307 read_unlock(&fcoe_hostlist_lock);
1308 return NULL;
1309}
1310
1311/*
1312 * fcoe_hostlist_lookup - find the corresponding lport by netdev
1313 * @netdev: ptr to net_device
1314 *
1315 * Returns: 0 for success
1316 */
1317struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1318{
1319 struct fcoe_softc *fc;
1320
1321 fc = fcoe_hostlist_lookup_softc(netdev);
1322
1323 return (fc) ? fc->lp : NULL;
1324}
1325EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
1326
1327/*
1328 * fcoe_hostlist_add - add a lport to lports list
1329 * @lp: ptr to the fc_lport to badded
1330 *
1331 * Returns: 0 for success
1332 */
1333int fcoe_hostlist_add(const struct fc_lport *lp)
1334{
1335 struct fcoe_softc *fc;
1336
1337 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1338 if (!fc) {
1339 fc = fcoe_softc(lp);
1340 write_lock_bh(&fcoe_hostlist_lock);
1341 list_add_tail(&fc->list, &fcoe_hostlist);
1342 write_unlock_bh(&fcoe_hostlist_lock);
1343 }
1344 return 0;
1345}
1346EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
1347
1348/*
1349 * fcoe_hostlist_remove - remove a lport from lports list
1350 * @lp: ptr to the fc_lport to badded
1351 *
1352 * Returns: 0 for success
1353 */
1354int fcoe_hostlist_remove(const struct fc_lport *lp)
1355{
1356 struct fcoe_softc *fc;
1357
1358 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1359 BUG_ON(!fc);
1360 write_lock_bh(&fcoe_hostlist_lock);
1361 list_del(&fc->list);
1362 write_unlock_bh(&fcoe_hostlist_lock);
1363
1364 return 0;
1365}
1366EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
1367
1368/**
1369 * fcoe_libfc_config - sets up libfc related properties for lport
1370 * @lp: ptr to the fc_lport
1371 * @tt: libfc function template
1372 *
1373 * Returns : 0 for success
1374 **/
1375int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
1376{
1377 /* Set the function pointers set by the LLDD */
1378 memcpy(&lp->tt, tt, sizeof(*tt));
1379 if (fc_fcp_init(lp))
1380 return -ENOMEM;
1381 fc_exch_init(lp);
1382 fc_elsct_init(lp);
1383 fc_lport_init(lp);
1384 fc_rport_init(lp);
1385 fc_disc_init(lp);
1386
1387 return 0;
1388}
1389EXPORT_SYMBOL_GPL(fcoe_libfc_config);
1390
1391/**
1392 * fcoe_init - fcoe module loading initialization
1393 *
1394 * Initialization routine
1395 * 1. Will create fc transport software structure
1396 * 2. initialize the link list of port information structure
1397 *
1398 * Returns 0 on success, negative on failure
1399 **/
1400static int __init fcoe_init(void)
1401{
1402 int cpu;
1403 struct fcoe_percpu_s *p;
1404
1405
1406 INIT_LIST_HEAD(&fcoe_hostlist);
1407 rwlock_init(&fcoe_hostlist_lock);
1408
1409#ifdef CONFIG_HOTPLUG_CPU
1410 register_cpu_notifier(&fcoe_cpu_notifier);
1411#endif /* CONFIG_HOTPLUG_CPU */
1412
1413 /*
1414 * initialize per CPU interrupt thread
1415 */
1416 for_each_online_cpu(cpu) {
1417 p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
1418 if (p) {
1419 p->thread = kthread_create(fcoe_percpu_receive_thread,
1420 (void *)p,
1421 "fcoethread/%d", cpu);
1422
1423 /*
1424 * if there is no error then bind the thread to the cpu
1425 * initialize the semaphore and skb queue head
1426 */
1427 if (likely(!IS_ERR(p->thread))) {
1428 p->cpu = cpu;
1429 fcoe_percpu[cpu] = p;
1430 skb_queue_head_init(&p->fcoe_rx_list);
1431 kthread_bind(p->thread, cpu);
1432 wake_up_process(p->thread);
1433 } else {
1434 fcoe_percpu[cpu] = NULL;
1435 kfree(p);
1436
1437 }
1438 }
1439 }
1440
1441 /*
1442 * setup link change notification
1443 */
1444 fcoe_dev_setup();
1445
1446 init_timer(&fcoe_timer);
1447 fcoe_timer.data = 0;
1448 fcoe_timer.function = fcoe_watchdog;
1449 fcoe_timer.expires = (jiffies + (10 * HZ));
1450 add_timer(&fcoe_timer);
1451
1452 /* initiatlize the fcoe transport */
1453 fcoe_transport_init();
1454
1455 fcoe_sw_init();
1456
1457 return 0;
1458}
1459module_init(fcoe_init);
1460
1461/**
1462 * fcoe_exit - fcoe module unloading cleanup
1463 *
1464 * Returns 0 on success, negative on failure
1465 **/
1466static void __exit fcoe_exit(void)
1467{
1468 u32 idx;
1469 struct fcoe_softc *fc, *tmp;
1470 struct fcoe_percpu_s *p;
1471 struct sk_buff *skb;
1472
1473 /*
1474 * Stop all call back interfaces
1475 */
1476#ifdef CONFIG_HOTPLUG_CPU
1477 unregister_cpu_notifier(&fcoe_cpu_notifier);
1478#endif /* CONFIG_HOTPLUG_CPU */
1479 fcoe_dev_cleanup();
1480
1481 /*
1482 * stop timer
1483 */
1484 del_timer_sync(&fcoe_timer);
1485
1486 /* releases the assocaited fcoe transport for each lport */
1487 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1488 fcoe_transport_release(fc->real_dev);
1489
1490 for (idx = 0; idx < NR_CPUS; idx++) {
1491 if (fcoe_percpu[idx]) {
1492 kthread_stop(fcoe_percpu[idx]->thread);
1493 p = fcoe_percpu[idx];
1494 spin_lock_bh(&p->fcoe_rx_list.lock);
1495 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1496 kfree_skb(skb);
1497 spin_unlock_bh(&p->fcoe_rx_list.lock);
1498 if (fcoe_percpu[idx]->crc_eof_page)
1499 put_page(fcoe_percpu[idx]->crc_eof_page);
1500 kfree(fcoe_percpu[idx]);
1501 }
1502 }
1503
1504 /* remove sw trasnport */
1505 fcoe_sw_exit();
1506
1507 /* detach the transport */
1508 fcoe_transport_exit();
1509}
1510module_exit(fcoe_exit);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 56f4e6bffc21..32eef66114c7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -3,7 +3,7 @@
3 * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org 3 * Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
4 * Author: Rickard E. Faith, faith@cs.unc.edu 4 * Author: Rickard E. Faith, faith@cs.unc.edu
5 * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org) 5 * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
6 * Shared IRQ supported added 7/7/2001 Alan Cox <alan@redhat.com> 6 * Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
7 7
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 4d15a62914e9..9c1e6a5b5af0 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -10,7 +10,7 @@
10 See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest 10 See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
11 updates, info and ADF-files for adapters supported by this driver. 11 updates, info and ADF-files for adapters supported by this driver.
12 12
13 Alan Cox <alan@redhat.com> 13 Alan Cox <alan@lxorguk.ukuu.org.uk>
14 Updated for Linux 2.5.45 to use the new error handler, cleaned up the 14 Updated for Linux 2.5.45 to use the new error handler, cleaned up the
15 lock macros and did a few unavoidable locking tweaks, plus one locking 15 lock macros and did a few unavoidable locking tweaks, plus one locking
16 fix in the irq and completion path. 16 fix in the irq and completion path.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7650707a40de..44f202f33101 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -121,6 +121,7 @@ static const struct {
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" }, 121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" }, 122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, 123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, 125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
125 126
126 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" }, 127 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
@@ -278,13 +279,6 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
278 rsp->data.info.rsp_code)) 279 rsp->data.info.rsp_code))
279 return DID_ERROR << 16; 280 return DID_ERROR << 16;
280 281
281 if (!vfc_cmd->status) {
282 if (rsp->flags & FCP_RESID_OVER)
283 return rsp->scsi_status | (DID_ERROR << 16);
284 else
285 return rsp->scsi_status | (DID_OK << 16);
286 }
287
288 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); 282 err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
289 if (err >= 0) 283 if (err >= 0)
290 return rsp->scsi_status | (cmd_status[err].result << 16); 284 return rsp->scsi_status | (cmd_status[err].result << 16);
@@ -503,6 +497,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
503 case IBMVFC_HOST_ACTION_INIT: 497 case IBMVFC_HOST_ACTION_INIT:
504 case IBMVFC_HOST_ACTION_TGT_DEL: 498 case IBMVFC_HOST_ACTION_TGT_DEL:
505 case IBMVFC_HOST_ACTION_QUERY_TGTS: 499 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
506 case IBMVFC_HOST_ACTION_TGT_ADD: 501 case IBMVFC_HOST_ACTION_TGT_ADD:
507 case IBMVFC_HOST_ACTION_NONE: 502 case IBMVFC_HOST_ACTION_NONE:
508 default: 503 default:
@@ -566,7 +561,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
566 struct ibmvfc_target *tgt; 561 struct ibmvfc_target *tgt;
567 562
568 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 563 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
569 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { 564 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
570 dev_err(vhost->dev, 565 dev_err(vhost->dev,
571 "Host initialization retries exceeded. Taking adapter offline\n"); 566 "Host initialization retries exceeded. Taking adapter offline\n");
572 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 567 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
@@ -765,6 +760,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
765 cmnd->scsi_done(cmnd); 760 cmnd->scsi_done(cmnd);
766 } 761 }
767 762
763 if (evt->eh_comp)
764 complete(evt->eh_comp);
765
768 ibmvfc_free_event(evt); 766 ibmvfc_free_event(evt);
769} 767}
770 768
@@ -847,11 +845,12 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
847static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 845static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
848{ 846{
849 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 847 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
850 if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { 848 vhost->delay_init = 1;
849 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
851 dev_err(vhost->dev, 850 dev_err(vhost->dev,
852 "Host initialization retries exceeded. Taking adapter offline\n"); 851 "Host initialization retries exceeded. Taking adapter offline\n");
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 852 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES) 853 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 854 __ibmvfc_reset_host(vhost);
856 else 855 else
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 856 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -1252,6 +1251,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
1252 evt->sync_iu = NULL; 1251 evt->sync_iu = NULL;
1253 evt->crq.format = format; 1252 evt->crq.format = format;
1254 evt->done = done; 1253 evt->done = done;
1254 evt->eh_comp = NULL;
1255} 1255}
1256 1256
1257/** 1257/**
@@ -1381,6 +1381,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
1381 add_timer(&evt->timer); 1381 add_timer(&evt->timer);
1382 } 1382 }
1383 1383
1384 mb();
1385
1384 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) { 1386 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1385 list_del(&evt->queue); 1387 list_del(&evt->queue);
1386 del_timer(&evt->timer); 1388 del_timer(&evt->timer);
@@ -1477,6 +1479,11 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1477 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; 1479 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1478 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1480 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1479 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1481 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1482 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1483 ibmvfc_reinit_host(evt->vhost);
1484
1485 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1486 cmnd->result = (DID_ERROR << 16);
1480 1487
1481 ibmvfc_log_error(evt); 1488 ibmvfc_log_error(evt);
1482 } 1489 }
@@ -1489,6 +1496,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1489 cmnd->scsi_done(cmnd); 1496 cmnd->scsi_done(cmnd);
1490 } 1497 }
1491 1498
1499 if (evt->eh_comp)
1500 complete(evt->eh_comp);
1501
1492 ibmvfc_free_event(evt); 1502 ibmvfc_free_event(evt);
1493} 1503}
1494 1504
@@ -1627,7 +1637,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
1627 struct ibmvfc_host *vhost = shost_priv(sdev->host); 1637 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1628 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1638 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1629 struct ibmvfc_cmd *tmf; 1639 struct ibmvfc_cmd *tmf;
1630 struct ibmvfc_event *evt; 1640 struct ibmvfc_event *evt = NULL;
1631 union ibmvfc_iu rsp_iu; 1641 union ibmvfc_iu rsp_iu;
1632 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; 1642 struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
1633 int rsp_rc = -EBUSY; 1643 int rsp_rc = -EBUSY;
@@ -1789,7 +1799,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
1789static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) 1799static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1790{ 1800{
1791 struct ibmvfc_host *vhost = shost_priv(sdev->host); 1801 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1792 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1802 struct scsi_target *starget = scsi_target(sdev);
1803 struct fc_rport *rport = starget_to_rport(starget);
1793 struct ibmvfc_tmf *tmf; 1804 struct ibmvfc_tmf *tmf;
1794 struct ibmvfc_event *evt, *found_evt; 1805 struct ibmvfc_event *evt, *found_evt;
1795 union ibmvfc_iu rsp; 1806 union ibmvfc_iu rsp;
@@ -1827,7 +1838,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1827 int_to_scsilun(sdev->lun, &tmf->lun); 1838 int_to_scsilun(sdev->lun, &tmf->lun);
1828 tmf->flags = (type | IBMVFC_TMF_LUA_VALID); 1839 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
1829 tmf->cancel_key = (unsigned long)sdev->hostdata; 1840 tmf->cancel_key = (unsigned long)sdev->hostdata;
1830 tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata); 1841 tmf->my_cancel_key = (unsigned long)starget->hostdata;
1831 1842
1832 evt->sync_iu = &rsp; 1843 evt->sync_iu = &rsp;
1833 init_completion(&evt->comp); 1844 init_completion(&evt->comp);
@@ -1859,6 +1870,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1859} 1870}
1860 1871
1861/** 1872/**
1873 * ibmvfc_match_target - Match function for specified target
1874 * @evt: ibmvfc event struct
1875 * @device: device to match (starget)
1876 *
1877 * Returns:
1878 * 1 if event matches starget / 0 if event does not match starget
1879 **/
1880static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
1881{
1882 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
1883 return 1;
1884 return 0;
1885}
1886
1887/**
1888 * ibmvfc_match_lun - Match function for specified LUN
1889 * @evt: ibmvfc event struct
1890 * @device: device to match (sdev)
1891 *
1892 * Returns:
1893 * 1 if event matches sdev / 0 if event does not match sdev
1894 **/
1895static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
1896{
1897 if (evt->cmnd && evt->cmnd->device == device)
1898 return 1;
1899 return 0;
1900}
1901
1902/**
1903 * ibmvfc_wait_for_ops - Wait for ops to complete
1904 * @vhost: ibmvfc host struct
1905 * @device: device to match (starget or sdev)
1906 * @match: match function
1907 *
1908 * Returns:
1909 * SUCCESS / FAILED
1910 **/
1911static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
1912 int (*match) (struct ibmvfc_event *, void *))
1913{
1914 struct ibmvfc_event *evt;
1915 DECLARE_COMPLETION_ONSTACK(comp);
1916 int wait;
1917 unsigned long flags;
1918 signed long timeout = init_timeout * HZ;
1919
1920 ENTER;
1921 do {
1922 wait = 0;
1923 spin_lock_irqsave(vhost->host->host_lock, flags);
1924 list_for_each_entry(evt, &vhost->sent, queue) {
1925 if (match(evt, device)) {
1926 evt->eh_comp = &comp;
1927 wait++;
1928 }
1929 }
1930 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1931
1932 if (wait) {
1933 timeout = wait_for_completion_timeout(&comp, timeout);
1934
1935 if (!timeout) {
1936 wait = 0;
1937 spin_lock_irqsave(vhost->host->host_lock, flags);
1938 list_for_each_entry(evt, &vhost->sent, queue) {
1939 if (match(evt, device)) {
1940 evt->eh_comp = NULL;
1941 wait++;
1942 }
1943 }
1944 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1945 if (wait)
1946 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
1947 LEAVE;
1948 return wait ? FAILED : SUCCESS;
1949 }
1950 }
1951 } while (wait);
1952
1953 LEAVE;
1954 return SUCCESS;
1955}
1956
1957/**
1862 * ibmvfc_eh_abort_handler - Abort a command 1958 * ibmvfc_eh_abort_handler - Abort a command
1863 * @cmd: scsi command to abort 1959 * @cmd: scsi command to abort
1864 * 1960 *
@@ -1867,29 +1963,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
1867 **/ 1963 **/
1868static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) 1964static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1869{ 1965{
1870 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1966 struct scsi_device *sdev = cmd->device;
1871 struct ibmvfc_event *evt, *pos; 1967 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1872 int cancel_rc, abort_rc; 1968 int cancel_rc, abort_rc;
1873 unsigned long flags; 1969 int rc = FAILED;
1874 1970
1875 ENTER; 1971 ENTER;
1876 ibmvfc_wait_while_resetting(vhost); 1972 ibmvfc_wait_while_resetting(vhost);
1877 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET); 1973 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
1878 abort_rc = ibmvfc_abort_task_set(cmd->device); 1974 abort_rc = ibmvfc_abort_task_set(sdev);
1879 1975
1880 if (!cancel_rc && !abort_rc) { 1976 if (!cancel_rc && !abort_rc)
1881 spin_lock_irqsave(vhost->host->host_lock, flags); 1977 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1882 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1883 if (evt->cmnd && evt->cmnd->device == cmd->device)
1884 ibmvfc_fail_request(evt, DID_ABORT);
1885 }
1886 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1887 LEAVE;
1888 return SUCCESS;
1889 }
1890 1978
1891 LEAVE; 1979 LEAVE;
1892 return FAILED; 1980 return rc;
1893} 1981}
1894 1982
1895/** 1983/**
@@ -1901,29 +1989,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
1901 **/ 1989 **/
1902static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) 1990static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
1903{ 1991{
1904 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 1992 struct scsi_device *sdev = cmd->device;
1905 struct ibmvfc_event *evt, *pos; 1993 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1906 int cancel_rc, reset_rc; 1994 int cancel_rc, reset_rc;
1907 unsigned long flags; 1995 int rc = FAILED;
1908 1996
1909 ENTER; 1997 ENTER;
1910 ibmvfc_wait_while_resetting(vhost); 1998 ibmvfc_wait_while_resetting(vhost);
1911 cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET); 1999 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
1912 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN"); 2000 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
1913 2001
1914 if (!cancel_rc && !reset_rc) { 2002 if (!cancel_rc && !reset_rc)
1915 spin_lock_irqsave(vhost->host->host_lock, flags); 2003 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
1916 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1917 if (evt->cmnd && evt->cmnd->device == cmd->device)
1918 ibmvfc_fail_request(evt, DID_ABORT);
1919 }
1920 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1921 LEAVE;
1922 return SUCCESS;
1923 }
1924 2004
1925 LEAVE; 2005 LEAVE;
1926 return FAILED; 2006 return rc;
1927} 2007}
1928 2008
1929/** 2009/**
@@ -1959,31 +2039,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
1959 **/ 2039 **/
1960static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) 2040static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
1961{ 2041{
1962 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2042 struct scsi_device *sdev = cmd->device;
1963 struct scsi_target *starget = scsi_target(cmd->device); 2043 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1964 struct ibmvfc_event *evt, *pos; 2044 struct scsi_target *starget = scsi_target(sdev);
1965 int reset_rc; 2045 int reset_rc;
2046 int rc = FAILED;
1966 unsigned long cancel_rc = 0; 2047 unsigned long cancel_rc = 0;
1967 unsigned long flags;
1968 2048
1969 ENTER; 2049 ENTER;
1970 ibmvfc_wait_while_resetting(vhost); 2050 ibmvfc_wait_while_resetting(vhost);
1971 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2051 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
1972 reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target"); 2052 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
1973 2053
1974 if (!cancel_rc && !reset_rc) { 2054 if (!cancel_rc && !reset_rc)
1975 spin_lock_irqsave(vhost->host->host_lock, flags); 2055 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
1976 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
1977 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
1978 ibmvfc_fail_request(evt, DID_ABORT);
1979 }
1980 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1981 LEAVE;
1982 return SUCCESS;
1983 }
1984 2056
1985 LEAVE; 2057 LEAVE;
1986 return FAILED; 2058 return rc;
1987} 2059}
1988 2060
1989/** 2061/**
@@ -2013,23 +2085,18 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2013 struct scsi_target *starget = to_scsi_target(&rport->dev); 2085 struct scsi_target *starget = to_scsi_target(&rport->dev);
2014 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2086 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2015 struct ibmvfc_host *vhost = shost_priv(shost); 2087 struct ibmvfc_host *vhost = shost_priv(shost);
2016 struct ibmvfc_event *evt, *pos;
2017 unsigned long cancel_rc = 0; 2088 unsigned long cancel_rc = 0;
2018 unsigned long abort_rc = 0; 2089 unsigned long abort_rc = 0;
2019 unsigned long flags; 2090 int rc = FAILED;
2020 2091
2021 ENTER; 2092 ENTER;
2022 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); 2093 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
2023 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); 2094 starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
2024 2095
2025 if (!cancel_rc && !abort_rc) { 2096 if (!cancel_rc && !abort_rc)
2026 spin_lock_irqsave(shost->host_lock, flags); 2097 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2027 list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { 2098
2028 if (evt->cmnd && scsi_target(evt->cmnd->device) == starget) 2099 if (rc == FAILED)
2029 ibmvfc_fail_request(evt, DID_ABORT);
2030 }
2031 spin_unlock_irqrestore(shost->host_lock, flags);
2032 } else
2033 ibmvfc_issue_fc_host_lip(shost); 2100 ibmvfc_issue_fc_host_lip(shost);
2034 LEAVE; 2101 LEAVE;
2035} 2102}
@@ -2089,15 +2156,17 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2089 case IBMVFC_AE_LINK_UP: 2156 case IBMVFC_AE_LINK_UP:
2090 case IBMVFC_AE_RESUME: 2157 case IBMVFC_AE_RESUME:
2091 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2158 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2092 ibmvfc_init_host(vhost, 1); 2159 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost);
2093 break; 2161 break;
2094 case IBMVFC_AE_SCN_FABRIC: 2162 case IBMVFC_AE_SCN_FABRIC:
2163 case IBMVFC_AE_SCN_DOMAIN:
2095 vhost->events_to_log |= IBMVFC_AE_RSCN; 2164 vhost->events_to_log |= IBMVFC_AE_RSCN;
2096 ibmvfc_init_host(vhost, 1); 2165 vhost->delay_init = 1;
2166 __ibmvfc_reset_host(vhost);
2097 break; 2167 break;
2098 case IBMVFC_AE_SCN_NPORT: 2168 case IBMVFC_AE_SCN_NPORT:
2099 case IBMVFC_AE_SCN_GROUP: 2169 case IBMVFC_AE_SCN_GROUP:
2100 case IBMVFC_AE_SCN_DOMAIN:
2101 vhost->events_to_log |= IBMVFC_AE_RSCN; 2170 vhost->events_to_log |= IBMVFC_AE_RSCN;
2102 case IBMVFC_AE_ELS_LOGO: 2171 case IBMVFC_AE_ELS_LOGO:
2103 case IBMVFC_AE_ELS_PRLO: 2172 case IBMVFC_AE_ELS_PRLO:
@@ -2263,6 +2332,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2263} 2332}
2264 2333
2265/** 2334/**
2335 * ibmvfc_target_alloc - Setup the target's task set value
2336 * @starget: struct scsi_target
2337 *
2338 * Set the target's task set value so that error handling works as
2339 * expected.
2340 *
2341 * Returns:
2342 * 0 on success / -ENXIO if device does not exist
2343 **/
2344static int ibmvfc_target_alloc(struct scsi_target *starget)
2345{
2346 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2347 struct ibmvfc_host *vhost = shost_priv(shost);
2348 unsigned long flags = 0;
2349
2350 spin_lock_irqsave(shost->host_lock, flags);
2351 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2352 spin_unlock_irqrestore(shost->host_lock, flags);
2353 return 0;
2354}
2355
2356/**
2266 * ibmvfc_slave_configure - Configure the device 2357 * ibmvfc_slave_configure - Configure the device
2267 * @sdev: struct scsi_device device to configure 2358 * @sdev: struct scsi_device device to configure
2268 * 2359 *
@@ -2541,6 +2632,7 @@ static struct scsi_host_template driver_template = {
2541 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, 2632 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
2542 .slave_alloc = ibmvfc_slave_alloc, 2633 .slave_alloc = ibmvfc_slave_alloc,
2543 .slave_configure = ibmvfc_slave_configure, 2634 .slave_configure = ibmvfc_slave_configure,
2635 .target_alloc = ibmvfc_target_alloc,
2544 .scan_finished = ibmvfc_scan_finished, 2636 .scan_finished = ibmvfc_scan_finished,
2545 .change_queue_depth = ibmvfc_change_queue_depth, 2637 .change_queue_depth = ibmvfc_change_queue_depth,
2546 .change_queue_type = ibmvfc_change_queue_type, 2638 .change_queue_type = ibmvfc_change_queue_type,
@@ -2637,7 +2729,7 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
2637 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 2729 } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
2638 vio_disable_interrupts(vdev); 2730 vio_disable_interrupts(vdev);
2639 ibmvfc_handle_async(async, vhost); 2731 ibmvfc_handle_async(async, vhost);
2640 crq->valid = 0; 2732 async->valid = 0;
2641 } else 2733 } else
2642 done = 1; 2734 done = 1;
2643 } 2735 }
@@ -2669,7 +2761,7 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2669static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2761static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2670 void (*job_step) (struct ibmvfc_target *)) 2762 void (*job_step) (struct ibmvfc_target *))
2671{ 2763{
2672 if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) { 2764 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2673 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2765 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2674 wake_up(&tgt->vhost->work_wait_q); 2766 wake_up(&tgt->vhost->work_wait_q);
2675 } else 2767 } else
@@ -2708,6 +2800,8 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2708 rsp->status, rsp->error, status); 2800 rsp->status, rsp->error, status);
2709 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2801 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2710 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2802 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2803 else
2804 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2711 break; 2805 break;
2712 }; 2806 };
2713 2807
@@ -2802,6 +2896,8 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2802 2896
2803 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2897 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2804 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 2898 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2899 else
2900 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2805 break; 2901 break;
2806 }; 2902 };
2807 2903
@@ -3093,6 +3189,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3093 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3189 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3094 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3190 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3095 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3191 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3192 else
3193 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3096 break; 3194 break;
3097 }; 3195 };
3098 3196
@@ -3423,6 +3521,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3423 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3521 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3424 case IBMVFC_HOST_ACTION_TGT_ADD: 3522 case IBMVFC_HOST_ACTION_TGT_ADD:
3425 case IBMVFC_HOST_ACTION_TGT_DEL: 3523 case IBMVFC_HOST_ACTION_TGT_DEL:
3524 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3426 case IBMVFC_HOST_ACTION_QUERY: 3525 case IBMVFC_HOST_ACTION_QUERY:
3427 default: 3526 default:
3428 break; 3527 break;
@@ -3519,7 +3618,13 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3519 break; 3618 break;
3520 case IBMVFC_HOST_ACTION_INIT: 3619 case IBMVFC_HOST_ACTION_INIT:
3521 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3620 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3522 vhost->job_step(vhost); 3621 if (vhost->delay_init) {
3622 vhost->delay_init = 0;
3623 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3624 ssleep(15);
3625 return;
3626 } else
3627 vhost->job_step(vhost);
3523 break; 3628 break;
3524 case IBMVFC_HOST_ACTION_QUERY: 3629 case IBMVFC_HOST_ACTION_QUERY:
3525 list_for_each_entry(tgt, &vhost->targets, queue) 3630 list_for_each_entry(tgt, &vhost->targets, queue)
@@ -3538,6 +3643,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3538 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); 3643 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
3539 break; 3644 break;
3540 case IBMVFC_HOST_ACTION_TGT_DEL: 3645 case IBMVFC_HOST_ACTION_TGT_DEL:
3646 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3541 list_for_each_entry(tgt, &vhost->targets, queue) { 3647 list_for_each_entry(tgt, &vhost->targets, queue) {
3542 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { 3648 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3543 tgt_dbg(tgt, "Deleting rport\n"); 3649 tgt_dbg(tgt, "Deleting rport\n");
@@ -3553,8 +3659,17 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3553 } 3659 }
3554 3660
3555 if (vhost->state == IBMVFC_INITIALIZING) { 3661 if (vhost->state == IBMVFC_INITIALIZING) {
3556 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3662 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3557 vhost->job_step = ibmvfc_discover_targets; 3663 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3664 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3665 vhost->init_retries = 0;
3666 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3667 scsi_unblock_requests(vhost->host);
3668 return;
3669 } else {
3670 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
3671 vhost->job_step = ibmvfc_discover_targets;
3672 }
3558 } else { 3673 } else {
3559 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 3674 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3560 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3675 spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -3577,14 +3692,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3577 } 3692 }
3578 } 3693 }
3579 3694
3580 if (!ibmvfc_dev_init_to_do(vhost)) { 3695 if (!ibmvfc_dev_init_to_do(vhost))
3581 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3696 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3582 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
3583 vhost->init_retries = 0;
3584 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3585 scsi_unblock_requests(vhost->host);
3586 return;
3587 }
3588 break; 3697 break;
3589 case IBMVFC_HOST_ACTION_TGT_ADD: 3698 case IBMVFC_HOST_ACTION_TGT_ADD:
3590 list_for_each_entry(tgt, &vhost->targets, queue) { 3699 list_for_each_entry(tgt, &vhost->targets, queue) {
@@ -3592,16 +3701,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3592 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3701 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3593 ibmvfc_tgt_add_rport(tgt); 3702 ibmvfc_tgt_add_rport(tgt);
3594 return; 3703 return;
3595 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3596 tgt_dbg(tgt, "Deleting rport\n");
3597 rport = tgt->rport;
3598 tgt->rport = NULL;
3599 list_del(&tgt->queue);
3600 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3601 if (rport)
3602 fc_remote_port_delete(rport);
3603 kref_put(&tgt->kref, ibmvfc_release_tgt);
3604 return;
3605 } 3704 }
3606 } 3705 }
3607 3706
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index fb3177ab6691..babdf3db59df 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,11 +29,11 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.2" 32#define IBMVFC_DRIVER_VERSION "1.0.4"
33#define IBMVFC_DRIVER_DATE "(August 14, 2008)" 33#define IBMVFC_DRIVER_DATE "(November 14, 2008)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 15 35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30 36#define IBMVFC_INIT_TIMEOUT 120
37#define IBMVFC_MAX_REQUESTS_DEFAULT 100 37#define IBMVFC_MAX_REQUESTS_DEFAULT 100
38 38
39#define IBMVFC_DEBUG 0 39#define IBMVFC_DEBUG 0
@@ -43,7 +43,8 @@
43#define IBMVFC_MAX_DISC_THREADS 4 43#define IBMVFC_MAX_DISC_THREADS 4
44#define IBMVFC_TGT_MEMPOOL_SZ 64 44#define IBMVFC_TGT_MEMPOOL_SZ 64
45#define IBMVFC_MAX_CMDS_PER_LUN 64 45#define IBMVFC_MAX_CMDS_PER_LUN 64
46#define IBMVFC_MAX_INIT_RETRIES 3 46#define IBMVFC_MAX_HOST_INIT_RETRIES 6
47#define IBMVFC_MAX_TGT_INIT_RETRIES 3
47#define IBMVFC_DEV_LOSS_TMO (5 * 60) 48#define IBMVFC_DEV_LOSS_TMO (5 * 60)
48#define IBMVFC_DEFAULT_LOG_LEVEL 2 49#define IBMVFC_DEFAULT_LOG_LEVEL 2
49#define IBMVFC_MAX_CDB_LEN 16 50#define IBMVFC_MAX_CDB_LEN 16
@@ -109,6 +110,7 @@ enum ibmvfc_vios_errors {
109 IBMVFC_TRANS_CANCELLED = 0x0006, 110 IBMVFC_TRANS_CANCELLED = 0x0006,
110 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007, 111 IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
111 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008, 112 IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
113 IBMVFC_PLOGI_REQUIRED = 0x0010,
112 IBMVFC_COMMAND_FAILED = 0x8000, 114 IBMVFC_COMMAND_FAILED = 0x8000,
113}; 115};
114 116
@@ -337,7 +339,6 @@ struct ibmvfc_tmf {
337#define IBMVFC_TMF_LUA_VALID 0x40 339#define IBMVFC_TMF_LUA_VALID 0x40
338 u32 cancel_key; 340 u32 cancel_key;
339 u32 my_cancel_key; 341 u32 my_cancel_key;
340#define IBMVFC_TMF_CANCEL_KEY 0x80000000
341 u32 pad; 342 u32 pad;
342 u64 reserved[2]; 343 u64 reserved[2];
343}__attribute__((packed, aligned (8))); 344}__attribute__((packed, aligned (8)));
@@ -524,10 +525,10 @@ enum ibmvfc_async_event {
524}; 525};
525 526
526struct ibmvfc_crq { 527struct ibmvfc_crq {
527 u8 valid; 528 volatile u8 valid;
528 u8 format; 529 volatile u8 format;
529 u8 reserved[6]; 530 u8 reserved[6];
530 u64 ioba; 531 volatile u64 ioba;
531}__attribute__((packed, aligned (8))); 532}__attribute__((packed, aligned (8)));
532 533
533struct ibmvfc_crq_queue { 534struct ibmvfc_crq_queue {
@@ -537,13 +538,13 @@ struct ibmvfc_crq_queue {
537}; 538};
538 539
539struct ibmvfc_async_crq { 540struct ibmvfc_async_crq {
540 u8 valid; 541 volatile u8 valid;
541 u8 pad[3]; 542 u8 pad[3];
542 u32 pad2; 543 u32 pad2;
543 u64 event; 544 volatile u64 event;
544 u64 scsi_id; 545 volatile u64 scsi_id;
545 u64 wwpn; 546 volatile u64 wwpn;
546 u64 node_name; 547 volatile u64 node_name;
547 u64 reserved; 548 u64 reserved;
548}__attribute__((packed, aligned (8))); 549}__attribute__((packed, aligned (8)));
549 550
@@ -606,6 +607,7 @@ struct ibmvfc_event {
606 struct srp_direct_buf *ext_list; 607 struct srp_direct_buf *ext_list;
607 dma_addr_t ext_list_token; 608 dma_addr_t ext_list_token;
608 struct completion comp; 609 struct completion comp;
610 struct completion *eh_comp;
609 struct timer_list timer; 611 struct timer_list timer;
610}; 612};
611 613
@@ -626,6 +628,7 @@ enum ibmvfc_host_action {
626 IBMVFC_HOST_ACTION_TGT_DEL, 628 IBMVFC_HOST_ACTION_TGT_DEL,
627 IBMVFC_HOST_ACTION_ALLOC_TGTS, 629 IBMVFC_HOST_ACTION_ALLOC_TGTS,
628 IBMVFC_HOST_ACTION_TGT_INIT, 630 IBMVFC_HOST_ACTION_TGT_INIT,
631 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
629 IBMVFC_HOST_ACTION_TGT_ADD, 632 IBMVFC_HOST_ACTION_TGT_ADD,
630}; 633};
631 634
@@ -671,6 +674,7 @@ struct ibmvfc_host {
671 int discovery_threads; 674 int discovery_threads;
672 int client_migrated; 675 int client_migrated;
673 int reinit; 676 int reinit;
677 int delay_init;
674 int events_to_log; 678 int events_to_log;
675#define IBMVFC_AE_LINKUP 0x0001 679#define IBMVFC_AE_LINKUP 0x0001
676#define IBMVFC_AE_LINKDOWN 0x0002 680#define IBMVFC_AE_LINKDOWN 0x0002
@@ -700,7 +704,7 @@ struct ibmvfc_host {
700 704
701#define ibmvfc_log(vhost, level, ...) \ 705#define ibmvfc_log(vhost, level, ...) \
702 do { \ 706 do { \
703 if (level >= (vhost)->log_level) \ 707 if ((vhost)->log_level >= level) \
704 dev_err((vhost)->dev, ##__VA_ARGS__); \ 708 dev_err((vhost)->dev, ##__VA_ARGS__); \
705 } while (0) 709 } while (0)
706 710
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 6cad1758243a..868d35ea01bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -107,7 +107,7 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(max_channel, "Largest channel value"); 107MODULE_PARM_DESC(max_channel, "Largest channel value");
108module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); 108module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 109MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
110module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); 110module_param_named(max_requests, max_requests, int, S_IRUGO);
111MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 111MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
112 112
113/* ------------------------------------------------------------ 113/* ------------------------------------------------------------
@@ -1657,7 +1657,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1657 1657
1658 vdev->dev.driver_data = NULL; 1658 vdev->dev.driver_data = NULL;
1659 1659
1660 driver_template.can_queue = max_requests; 1660 driver_template.can_queue = max_requests - 2;
1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1662 if (!host) { 1662 if (!host) {
1663 dev_err(&vdev->dev, "couldn't allocate host data\n"); 1663 dev_err(&vdev->dev, "couldn't allocate host data\n");
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 8053b1e86ccb..52bdc6df6b92 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -107,7 +107,7 @@
107 * this thing into as good a shape as possible, and I'm positive 107 * this thing into as good a shape as possible, and I'm positive
108 * there are lots of lurking bugs and "Stupid Places". 108 * there are lots of lurking bugs and "Stupid Places".
109 * 109 *
110 * Updated for Linux 2.5 by Alan Cox <alan@redhat.com> 110 * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
111 * - Using new_eh handler 111 * - Using new_eh handler
112 * - Hopefully got all the locking right again 112 * - Hopefully got all the locking right again
113 * See "FIXME" notes for items that could do with more work 113 * See "FIXME" notes for items that could do with more work
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e3f739776bad..5529518ff2fa 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl> 5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de> 6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
7 * Copyright (c) 2007 Red Hat <alan@redhat.com> 7 * Copyright (c) 2007 Red Hat
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index cb48efa81fe2..e58af9e95506 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,7 +4,7 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com> 7 * Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index ded854a6dd35..0edfb1fa63ce 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5389,9 +5389,9 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5389 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5389 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5390 wake_up_all(&ioa_cfg->reset_wait_q); 5390 wake_up_all(&ioa_cfg->reset_wait_q);
5391 5391
5392 spin_unlock_irq(ioa_cfg->host->host_lock); 5392 spin_unlock(ioa_cfg->host->host_lock);
5393 scsi_unblock_requests(ioa_cfg->host); 5393 scsi_unblock_requests(ioa_cfg->host);
5394 spin_lock_irq(ioa_cfg->host->host_lock); 5394 spin_lock(ioa_cfg->host->host_lock);
5395 5395
5396 if (!ioa_cfg->allow_cmds) 5396 if (!ioa_cfg->allow_cmds)
5397 scsi_block_requests(ioa_cfg->host); 5397 scsi_block_requests(ioa_cfg->host);
@@ -7473,7 +7473,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7473 goto out_scsi_host_put; 7473 goto out_scsi_host_put;
7474 } 7474 }
7475 7475
7476 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0)); 7476 ipr_regs = pci_ioremap_bar(pdev, 0);
7477 7477
7478 if (!ipr_regs) { 7478 if (!ipr_regs) {
7479 dev_err(&pdev->dev, 7479 dev_err(&pdev->dev,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4871dd1f2582..59459141b437 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -19,7 +19,7 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * 21 *
22 * Alan Cox <alan@redhat.com> - Removed several careless u32/dma_addr_t errors 22 * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
23 * that broke 64bit platforms. 23 * that broke 64bit platforms.
24 */ 24 */
25 25
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e11bce6ab63c..23808dfe22ba 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/inet.h> 30#include <linux/inet.h>
32#include <linux/file.h> 31#include <linux/file.h>
33#include <linux/blkdev.h> 32#include <linux/blkdev.h>
@@ -44,12 +43,12 @@
44 43
45#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
46 45
47MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
47 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
48 "Alex Aizman <itn780@yahoo.com>"); 48 "Alex Aizman <itn780@yahoo.com>");
49MODULE_DESCRIPTION("iSCSI/TCP data-path"); 49MODULE_DESCRIPTION("iSCSI/TCP data-path");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51#undef DEBUG_TCP 51#undef DEBUG_TCP
52#define DEBUG_ASSERT
53 52
54#ifdef DEBUG_TCP 53#ifdef DEBUG_TCP
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt) 54#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
@@ -57,934 +56,41 @@ MODULE_LICENSE("GPL");
57#define debug_tcp(fmt...) 56#define debug_tcp(fmt...)
58#endif 57#endif
59 58
60#ifndef DEBUG_ASSERT 59static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
61#ifdef BUG_ON 60static struct scsi_host_template iscsi_sw_tcp_sht;
62#undef BUG_ON 61static struct iscsi_transport iscsi_sw_tcp_transport;
63#endif
64#define BUG_ON(expr)
65#endif
66
67static struct scsi_transport_template *iscsi_tcp_scsi_transport;
68static struct scsi_host_template iscsi_sht;
69static struct iscsi_transport iscsi_tcp_transport;
70 62
71static unsigned int iscsi_max_lun = 512; 63static unsigned int iscsi_max_lun = 512;
72module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 64module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
73 65
74static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
75 struct iscsi_segment *segment);
76
77/*
78 * Scatterlist handling: inside the iscsi_segment, we
79 * remember an index into the scatterlist, and set data/size
80 * to the current scatterlist entry. For highmem pages, we
81 * kmap as needed.
82 *
83 * Note that the page is unmapped when we return from
84 * TCP's data_ready handler, so we may end up mapping and
85 * unmapping the same page repeatedly. The whole reason
86 * for this is that we shouldn't keep the page mapped
87 * outside the softirq.
88 */
89
90/**
91 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
92 * @segment: the buffer object
93 * @sg: scatterlist
94 * @offset: byte offset into that sg entry
95 *
96 * This function sets up the segment so that subsequent
97 * data is copied to the indicated sg entry, at the given
98 * offset.
99 */
100static inline void
101iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
102 struct scatterlist *sg, unsigned int offset)
103{
104 segment->sg = sg;
105 segment->sg_offset = offset;
106 segment->size = min(sg->length - offset,
107 segment->total_size - segment->total_copied);
108 segment->data = NULL;
109}
110
111/**
112 * iscsi_tcp_segment_map - map the current S/G page
113 * @segment: iscsi_segment
114 * @recv: 1 if called from recv path
115 *
116 * We only need to possibly kmap data if scatter lists are being used,
117 * because the iscsi passthrough and internal IO paths will never use high
118 * mem pages.
119 */
120static inline void
121iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
122{
123 struct scatterlist *sg;
124
125 if (segment->data != NULL || !segment->sg)
126 return;
127
128 sg = segment->sg;
129 BUG_ON(segment->sg_mapped);
130 BUG_ON(sg->length == 0);
131
132 /*
133 * If the page count is greater than one it is ok to send
134 * to the network layer's zero copy send path. If not we
135 * have to go the slow sendmsg path. We always map for the
136 * recv path.
137 */
138 if (page_count(sg_page(sg)) >= 1 && !recv)
139 return;
140
141 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
142 segment);
143 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
144 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
145}
146
147static inline void
148iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
149{
150 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
151
152 if (segment->sg_mapped) {
153 debug_tcp("iscsi_tcp_segment_unmap valid\n");
154 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
155 segment->sg_mapped = NULL;
156 segment->data = NULL;
157 }
158}
159
160/*
161 * Splice the digest buffer into the buffer
162 */
163static inline void
164iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
165{
166 segment->data = digest;
167 segment->digest_len = ISCSI_DIGEST_SIZE;
168 segment->total_size += ISCSI_DIGEST_SIZE;
169 segment->size = ISCSI_DIGEST_SIZE;
170 segment->copied = 0;
171 segment->sg = NULL;
172 segment->hash = NULL;
173}
174
175/**
176 * iscsi_tcp_segment_done - check whether the segment is complete
177 * @segment: iscsi segment to check
178 * @recv: set to one of this is called from the recv path
179 * @copied: number of bytes copied
180 *
181 * Check if we're done receiving this segment. If the receive
182 * buffer is full but we expect more data, move on to the
183 * next entry in the scatterlist.
184 *
185 * If the amount of data we received isn't a multiple of 4,
186 * we will transparently receive the pad bytes, too.
187 *
188 * This function must be re-entrant.
189 */
190static inline int
191iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
192{
193 static unsigned char padbuf[ISCSI_PAD_LEN];
194 struct scatterlist sg;
195 unsigned int pad;
196
197 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
198 segment->size, recv ? "recv" : "xmit");
199 if (segment->hash && copied) {
200 /*
201 * If a segment is kmapd we must unmap it before sending
202 * to the crypto layer since that will try to kmap it again.
203 */
204 iscsi_tcp_segment_unmap(segment);
205
206 if (!segment->data) {
207 sg_init_table(&sg, 1);
208 sg_set_page(&sg, sg_page(segment->sg), copied,
209 segment->copied + segment->sg_offset +
210 segment->sg->offset);
211 } else
212 sg_init_one(&sg, segment->data + segment->copied,
213 copied);
214 crypto_hash_update(segment->hash, &sg, copied);
215 }
216
217 segment->copied += copied;
218 if (segment->copied < segment->size) {
219 iscsi_tcp_segment_map(segment, recv);
220 return 0;
221 }
222
223 segment->total_copied += segment->copied;
224 segment->copied = 0;
225 segment->size = 0;
226
227 /* Unmap the current scatterlist page, if there is one. */
228 iscsi_tcp_segment_unmap(segment);
229
230 /* Do we have more scatterlist entries? */
231 debug_tcp("total copied %u total size %u\n", segment->total_copied,
232 segment->total_size);
233 if (segment->total_copied < segment->total_size) {
234 /* Proceed to the next entry in the scatterlist. */
235 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
236 0);
237 iscsi_tcp_segment_map(segment, recv);
238 BUG_ON(segment->size == 0);
239 return 0;
240 }
241
242 /* Do we need to handle padding? */
243 pad = iscsi_padding(segment->total_copied);
244 if (pad != 0) {
245 debug_tcp("consume %d pad bytes\n", pad);
246 segment->total_size += pad;
247 segment->size = pad;
248 segment->data = padbuf;
249 return 0;
250 }
251
252 /*
253 * Set us up for transferring the data digest. hdr digest
254 * is completely handled in hdr done function.
255 */
256 if (segment->hash) {
257 crypto_hash_final(segment->hash, segment->digest);
258 iscsi_tcp_segment_splice_digest(segment,
259 recv ? segment->recv_digest : segment->digest);
260 return 0;
261 }
262
263 return 1;
264}
265
266/**
267 * iscsi_tcp_xmit_segment - transmit segment
268 * @tcp_conn: the iSCSI TCP connection
269 * @segment: the buffer to transmnit
270 *
271 * This function transmits as much of the buffer as
272 * the network layer will accept, and returns the number of
273 * bytes transmitted.
274 *
275 * If CRC hashing is enabled, the function will compute the
276 * hash as it goes. When the entire segment has been transmitted,
277 * it will retrieve the hash value and send it as well.
278 */
279static int
280iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
281 struct iscsi_segment *segment)
282{
283 struct socket *sk = tcp_conn->sock;
284 unsigned int copied = 0;
285 int r = 0;
286
287 while (!iscsi_tcp_segment_done(segment, 0, r)) {
288 struct scatterlist *sg;
289 unsigned int offset, copy;
290 int flags = 0;
291
292 r = 0;
293 offset = segment->copied;
294 copy = segment->size - offset;
295
296 if (segment->total_copied + segment->size < segment->total_size)
297 flags |= MSG_MORE;
298
299 /* Use sendpage if we can; else fall back to sendmsg */
300 if (!segment->data) {
301 sg = segment->sg;
302 offset += segment->sg_offset + sg->offset;
303 r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
304 flags);
305 } else {
306 struct msghdr msg = { .msg_flags = flags };
307 struct kvec iov = {
308 .iov_base = segment->data + offset,
309 .iov_len = copy
310 };
311
312 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
313 }
314
315 if (r < 0) {
316 iscsi_tcp_segment_unmap(segment);
317 if (copied || r == -EAGAIN)
318 break;
319 return r;
320 }
321 copied += r;
322 }
323 return copied;
324}
325
326/**
327 * iscsi_tcp_segment_recv - copy data to segment
328 * @tcp_conn: the iSCSI TCP connection
329 * @segment: the buffer to copy to
330 * @ptr: data pointer
331 * @len: amount of data available
332 *
333 * This function copies up to @len bytes to the
334 * given buffer, and returns the number of bytes
335 * consumed, which can actually be less than @len.
336 *
337 * If hash digest is enabled, the function will update the
338 * hash while copying.
339 * Combining these two operations doesn't buy us a lot (yet),
340 * but in the future we could implement combined copy+crc,
341 * just way we do for network layer checksums.
342 */
343static int
344iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
345 struct iscsi_segment *segment, const void *ptr,
346 unsigned int len)
347{
348 unsigned int copy = 0, copied = 0;
349
350 while (!iscsi_tcp_segment_done(segment, 1, copy)) {
351 if (copied == len) {
352 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
353 len);
354 break;
355 }
356
357 copy = min(len - copied, segment->size - segment->copied);
358 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
359 memcpy(segment->data + segment->copied, ptr + copied, copy);
360 copied += copy;
361 }
362 return copied;
363}
364
365static inline void
366iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
367 unsigned char digest[ISCSI_DIGEST_SIZE])
368{
369 struct scatterlist sg;
370
371 sg_init_one(&sg, hdr, hdrlen);
372 crypto_hash_digest(hash, &sg, hdrlen, digest);
373}
374
375static inline int
376iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
377 struct iscsi_segment *segment)
378{
379 if (!segment->digest_len)
380 return 1;
381
382 if (memcmp(segment->recv_digest, segment->digest,
383 segment->digest_len)) {
384 debug_scsi("digest mismatch\n");
385 return 0;
386 }
387
388 return 1;
389}
390
391/*
392 * Helper function to set up segment buffer
393 */
394static inline void
395__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
396 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
397{
398 memset(segment, 0, sizeof(*segment));
399 segment->total_size = size;
400 segment->done = done;
401
402 if (hash) {
403 segment->hash = hash;
404 crypto_hash_init(hash);
405 }
406}
407
408static inline void
409iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
410 size_t size, iscsi_segment_done_fn_t *done,
411 struct hash_desc *hash)
412{
413 __iscsi_segment_init(segment, size, done, hash);
414 segment->data = data;
415 segment->size = size;
416}
417
418static inline int
419iscsi_segment_seek_sg(struct iscsi_segment *segment,
420 struct scatterlist *sg_list, unsigned int sg_count,
421 unsigned int offset, size_t size,
422 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
423{
424 struct scatterlist *sg;
425 unsigned int i;
426
427 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
428 offset, size);
429 __iscsi_segment_init(segment, size, done, hash);
430 for_each_sg(sg_list, sg, sg_count, i) {
431 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
432 sg->offset);
433 if (offset < sg->length) {
434 iscsi_tcp_segment_init_sg(segment, sg, offset);
435 return 0;
436 }
437 offset -= sg->length;
438 }
439
440 return ISCSI_ERR_DATA_OFFSET;
441}
442
443/**
444 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
445 * @tcp_conn: iscsi connection to prep for
446 *
447 * This function always passes NULL for the hash argument, because when this
448 * function is called we do not yet know the final size of the header and want
449 * to delay the digest processing until we know that.
450 */
451static void
452iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
453{
454 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
455 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
456 iscsi_segment_init_linear(&tcp_conn->in.segment,
457 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
458 iscsi_tcp_hdr_recv_done, NULL);
459}
460
461/*
462 * Handle incoming reply to any other type of command
463 */
464static int
465iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
466 struct iscsi_segment *segment)
467{
468 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
469 int rc = 0;
470
471 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
472 return ISCSI_ERR_DATA_DGST;
473
474 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
475 conn->data, tcp_conn->in.datalen);
476 if (rc)
477 return rc;
478
479 iscsi_tcp_hdr_recv_prep(tcp_conn);
480 return 0;
481}
482
483static void
484iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
485{
486 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
487 struct hash_desc *rx_hash = NULL;
488
489 if (conn->datadgst_en)
490 rx_hash = &tcp_conn->rx_hash;
491
492 iscsi_segment_init_linear(&tcp_conn->in.segment,
493 conn->data, tcp_conn->in.datalen,
494 iscsi_tcp_data_recv_done, rx_hash);
495}
496
497/*
498 * must be called with session lock
499 */
500static void
501iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
502{
503 struct iscsi_tcp_task *tcp_task = task->dd_data;
504 struct iscsi_r2t_info *r2t;
505
506 /* nothing to do for mgmt tasks */
507 if (!task->sc)
508 return;
509
510 /* flush task's r2t queues */
511 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
512 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
513 sizeof(void*));
514 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
515 }
516
517 r2t = tcp_task->r2t;
518 if (r2t != NULL) {
519 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
520 sizeof(void*));
521 tcp_task->r2t = NULL;
522 }
523}
524
525/**
526 * iscsi_data_in - SCSI Data-In Response processing
527 * @conn: iscsi connection
528 * @task: scsi command task
529 **/
530static int
531iscsi_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
532{
533 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
534 struct iscsi_tcp_task *tcp_task = task->dd_data;
535 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
536 int datasn = be32_to_cpu(rhdr->datasn);
537 unsigned total_in_length = scsi_in(task->sc)->length;
538
539 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
540 if (tcp_conn->in.datalen == 0)
541 return 0;
542
543 if (tcp_task->exp_datasn != datasn) {
544 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
545 __func__, tcp_task->exp_datasn, datasn);
546 return ISCSI_ERR_DATASN;
547 }
548
549 tcp_task->exp_datasn++;
550
551 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
552 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
553 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
554 __func__, tcp_task->data_offset,
555 tcp_conn->in.datalen, total_in_length);
556 return ISCSI_ERR_DATA_OFFSET;
557 }
558
559 conn->datain_pdus_cnt++;
560 return 0;
561}
562
563/**
564 * iscsi_solicit_data_init - initialize first Data-Out
565 * @conn: iscsi connection
566 * @task: scsi command task
567 * @r2t: R2T info
568 *
569 * Notes:
570 * Initialize first Data-Out within this R2T sequence and finds
571 * proper data_offset within this SCSI command.
572 *
573 * This function is called with connection lock taken.
574 **/
575static void
576iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
577 struct iscsi_r2t_info *r2t)
578{
579 struct iscsi_data *hdr;
580
581 hdr = &r2t->dtask.hdr;
582 memset(hdr, 0, sizeof(struct iscsi_data));
583 hdr->ttt = r2t->ttt;
584 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
585 r2t->solicit_datasn++;
586 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
587 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
588 hdr->itt = task->hdr->itt;
589 hdr->exp_statsn = r2t->exp_statsn;
590 hdr->offset = cpu_to_be32(r2t->data_offset);
591 if (r2t->data_length > conn->max_xmit_dlength) {
592 hton24(hdr->dlength, conn->max_xmit_dlength);
593 r2t->data_count = conn->max_xmit_dlength;
594 hdr->flags = 0;
595 } else {
596 hton24(hdr->dlength, r2t->data_length);
597 r2t->data_count = r2t->data_length;
598 hdr->flags = ISCSI_FLAG_CMD_FINAL;
599 }
600 conn->dataout_pdus_cnt++;
601
602 r2t->sent = 0;
603}
604
605/**
606 * iscsi_r2t_rsp - iSCSI R2T Response processing
607 * @conn: iscsi connection
608 * @task: scsi command task
609 **/
610static int
611iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
612{
613 struct iscsi_r2t_info *r2t;
614 struct iscsi_session *session = conn->session;
615 struct iscsi_tcp_task *tcp_task = task->dd_data;
616 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
617 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
618 int r2tsn = be32_to_cpu(rhdr->r2tsn);
619 int rc;
620
621 if (tcp_conn->in.datalen) {
622 iscsi_conn_printk(KERN_ERR, conn,
623 "invalid R2t with datalen %d\n",
624 tcp_conn->in.datalen);
625 return ISCSI_ERR_DATALEN;
626 }
627
628 if (tcp_task->exp_datasn != r2tsn){
629 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
630 __func__, tcp_task->exp_datasn, r2tsn);
631 return ISCSI_ERR_R2TSN;
632 }
633
634 /* fill-in new R2T associated with the task */
635 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
636
637 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
638 iscsi_conn_printk(KERN_INFO, conn,
639 "dropping R2T itt %d in recovery.\n",
640 task->itt);
641 return 0;
642 }
643
644 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
645 BUG_ON(!rc);
646
647 r2t->exp_statsn = rhdr->statsn;
648 r2t->data_length = be32_to_cpu(rhdr->data_length);
649 if (r2t->data_length == 0) {
650 iscsi_conn_printk(KERN_ERR, conn,
651 "invalid R2T with zero data len\n");
652 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
653 sizeof(void*));
654 return ISCSI_ERR_DATALEN;
655 }
656
657 if (r2t->data_length > session->max_burst)
658 debug_scsi("invalid R2T with data len %u and max burst %u."
659 "Attempting to execute request.\n",
660 r2t->data_length, session->max_burst);
661
662 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
663 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
664 iscsi_conn_printk(KERN_ERR, conn,
665 "invalid R2T with data len %u at offset %u "
666 "and total length %d\n", r2t->data_length,
667 r2t->data_offset, scsi_out(task->sc)->length);
668 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
669 sizeof(void*));
670 return ISCSI_ERR_DATALEN;
671 }
672
673 r2t->ttt = rhdr->ttt; /* no flip */
674 r2t->solicit_datasn = 0;
675
676 iscsi_solicit_data_init(conn, task, r2t);
677
678 tcp_task->exp_datasn = r2tsn + 1;
679 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
680 conn->r2t_pdus_cnt++;
681
682 iscsi_requeue_task(task);
683 return 0;
684}
685
686/*
687 * Handle incoming reply to DataIn command
688 */
689static int
690iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
691 struct iscsi_segment *segment)
692{
693 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
694 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
695 int rc;
696
697 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
698 return ISCSI_ERR_DATA_DGST;
699
700 /* check for non-exceptional status */
701 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
702 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
703 if (rc)
704 return rc;
705 }
706
707 iscsi_tcp_hdr_recv_prep(tcp_conn);
708 return 0;
709}
710
711/**
712 * iscsi_tcp_hdr_dissect - process PDU header
713 * @conn: iSCSI connection
714 * @hdr: PDU header
715 *
716 * This function analyzes the header of the PDU received,
717 * and performs several sanity checks. If the PDU is accompanied
718 * by data, the receive buffer is set up to copy the incoming data
719 * to the correct location.
720 */
721static int
722iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
723{
724 int rc = 0, opcode, ahslen;
725 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
726 struct iscsi_task *task;
727
728 /* verify PDU length */
729 tcp_conn->in.datalen = ntoh24(hdr->dlength);
730 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
731 iscsi_conn_printk(KERN_ERR, conn,
732 "iscsi_tcp: datalen %d > %d\n",
733 tcp_conn->in.datalen, conn->max_recv_dlength);
734 return ISCSI_ERR_DATALEN;
735 }
736
737 /* Additional header segments. So far, we don't
738 * process additional headers.
739 */
740 ahslen = hdr->hlength << 2;
741
742 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
743 /* verify itt (itt encoding: age+cid+itt) */
744 rc = iscsi_verify_itt(conn, hdr->itt);
745 if (rc)
746 return rc;
747
748 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
749 opcode, ahslen, tcp_conn->in.datalen);
750
751 switch(opcode) {
752 case ISCSI_OP_SCSI_DATA_IN:
753 spin_lock(&conn->session->lock);
754 task = iscsi_itt_to_ctask(conn, hdr->itt);
755 if (!task)
756 rc = ISCSI_ERR_BAD_ITT;
757 else
758 rc = iscsi_data_in(conn, task);
759 if (rc) {
760 spin_unlock(&conn->session->lock);
761 break;
762 }
763
764 if (tcp_conn->in.datalen) {
765 struct iscsi_tcp_task *tcp_task = task->dd_data;
766 struct hash_desc *rx_hash = NULL;
767 struct scsi_data_buffer *sdb = scsi_in(task->sc);
768
769 /*
770 * Setup copy of Data-In into the Scsi_Cmnd
771 * Scatterlist case:
772 * We set up the iscsi_segment to point to the next
773 * scatterlist entry to copy to. As we go along,
774 * we move on to the next scatterlist entry and
775 * update the digest per-entry.
776 */
777 if (conn->datadgst_en)
778 rx_hash = &tcp_conn->rx_hash;
779
780 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
781 "datalen=%d)\n", tcp_conn,
782 tcp_task->data_offset,
783 tcp_conn->in.datalen);
784 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
785 sdb->table.sgl,
786 sdb->table.nents,
787 tcp_task->data_offset,
788 tcp_conn->in.datalen,
789 iscsi_tcp_process_data_in,
790 rx_hash);
791 spin_unlock(&conn->session->lock);
792 return rc;
793 }
794 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
795 spin_unlock(&conn->session->lock);
796 break;
797 case ISCSI_OP_SCSI_CMD_RSP:
798 if (tcp_conn->in.datalen) {
799 iscsi_tcp_data_recv_prep(tcp_conn);
800 return 0;
801 }
802 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
803 break;
804 case ISCSI_OP_R2T:
805 spin_lock(&conn->session->lock);
806 task = iscsi_itt_to_ctask(conn, hdr->itt);
807 if (!task)
808 rc = ISCSI_ERR_BAD_ITT;
809 else if (ahslen)
810 rc = ISCSI_ERR_AHSLEN;
811 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
812 rc = iscsi_r2t_rsp(conn, task);
813 else
814 rc = ISCSI_ERR_PROTO;
815 spin_unlock(&conn->session->lock);
816 break;
817 case ISCSI_OP_LOGIN_RSP:
818 case ISCSI_OP_TEXT_RSP:
819 case ISCSI_OP_REJECT:
820 case ISCSI_OP_ASYNC_EVENT:
821 /*
822 * It is possible that we could get a PDU with a buffer larger
823 * than 8K, but there are no targets that currently do this.
824 * For now we fail until we find a vendor that needs it
825 */
826 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
827 iscsi_conn_printk(KERN_ERR, conn,
828 "iscsi_tcp: received buffer of "
829 "len %u but conn buffer is only %u "
830 "(opcode %0x)\n",
831 tcp_conn->in.datalen,
832 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
833 rc = ISCSI_ERR_PROTO;
834 break;
835 }
836
837 /* If there's data coming in with the response,
838 * receive it to the connection's buffer.
839 */
840 if (tcp_conn->in.datalen) {
841 iscsi_tcp_data_recv_prep(tcp_conn);
842 return 0;
843 }
844 /* fall through */
845 case ISCSI_OP_LOGOUT_RSP:
846 case ISCSI_OP_NOOP_IN:
847 case ISCSI_OP_SCSI_TMFUNC_RSP:
848 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
849 break;
850 default:
851 rc = ISCSI_ERR_BAD_OPCODE;
852 break;
853 }
854
855 if (rc == 0) {
856 /* Anything that comes with data should have
857 * been handled above. */
858 if (tcp_conn->in.datalen)
859 return ISCSI_ERR_PROTO;
860 iscsi_tcp_hdr_recv_prep(tcp_conn);
861 }
862
863 return rc;
864}
865
866/** 66/**
867 * iscsi_tcp_hdr_recv_done - process PDU header 67 * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
868 *
869 * This is the callback invoked when the PDU header has
870 * been received. If the header is followed by additional
871 * header segments, we go back for more data.
872 */
873static int
874iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
875 struct iscsi_segment *segment)
876{
877 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
878 struct iscsi_hdr *hdr;
879
880 /* Check if there are additional header segments
881 * *prior* to computing the digest, because we
882 * may need to go back to the caller for more.
883 */
884 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
885 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
886 /* Bump the header length - the caller will
887 * just loop around and get the AHS for us, and
888 * call again. */
889 unsigned int ahslen = hdr->hlength << 2;
890
891 /* Make sure we don't overflow */
892 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
893 return ISCSI_ERR_AHSLEN;
894
895 segment->total_size += ahslen;
896 segment->size += ahslen;
897 return 0;
898 }
899
900 /* We're done processing the header. See if we're doing
901 * header digests; if so, set up the recv_digest buffer
902 * and go back for more. */
903 if (conn->hdrdgst_en) {
904 if (segment->digest_len == 0) {
905 iscsi_tcp_segment_splice_digest(segment,
906 segment->recv_digest);
907 return 0;
908 }
909 iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
910 segment->total_copied - ISCSI_DIGEST_SIZE,
911 segment->digest);
912
913 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
914 return ISCSI_ERR_HDR_DGST;
915 }
916
917 tcp_conn->in.hdr = hdr;
918 return iscsi_tcp_hdr_dissect(conn, hdr);
919}
920
921/**
922 * iscsi_tcp_recv - TCP receive in sendfile fashion
923 * @rd_desc: read descriptor 68 * @rd_desc: read descriptor
924 * @skb: socket buffer 69 * @skb: socket buffer
925 * @offset: offset in skb 70 * @offset: offset in skb
926 * @len: skb->len - offset 71 * @len: skb->len - offset
927 **/ 72 */
928static int 73static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
929iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 74 unsigned int offset, size_t len)
930 unsigned int offset, size_t len)
931{ 75{
932 struct iscsi_conn *conn = rd_desc->arg.data; 76 struct iscsi_conn *conn = rd_desc->arg.data;
933 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 77 unsigned int consumed, total_consumed = 0;
934 struct iscsi_segment *segment = &tcp_conn->in.segment; 78 int status;
935 struct skb_seq_state seq;
936 unsigned int consumed = 0;
937 int rc = 0;
938 79
939 debug_tcp("in %d bytes\n", skb->len - offset); 80 debug_tcp("in %d bytes\n", skb->len - offset);
940 81
941 if (unlikely(conn->suspend_rx)) { 82 do {
942 debug_tcp("conn %d Rx suspended!\n", conn->id); 83 status = 0;
943 return 0; 84 consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
944 } 85 offset += consumed;
86 total_consumed += consumed;
87 } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
945 88
946 skb_prepare_seq_read(skb, offset, skb->len, &seq); 89 debug_tcp("read %d bytes status %d\n", skb->len - offset, status);
947 while (1) { 90 return total_consumed;
948 unsigned int avail;
949 const u8 *ptr;
950
951 avail = skb_seq_read(consumed, &ptr, &seq);
952 if (avail == 0) {
953 debug_tcp("no more data avail. Consumed %d\n",
954 consumed);
955 break;
956 }
957 BUG_ON(segment->copied >= segment->size);
958
959 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
960 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
961 BUG_ON(rc == 0);
962 consumed += rc;
963
964 if (segment->total_copied >= segment->total_size) {
965 debug_tcp("segment done\n");
966 rc = segment->done(tcp_conn, segment);
967 if (rc != 0) {
968 skb_abort_seq_read(&seq);
969 goto error;
970 }
971
972 /* The done() functions sets up the
973 * next segment. */
974 }
975 }
976 skb_abort_seq_read(&seq);
977 conn->rxdata_octets += consumed;
978 return consumed;
979
980error:
981 debug_tcp("Error receiving PDU, errno=%d\n", rc);
982 iscsi_conn_failure(conn, rc);
983 return 0;
984} 91}
985 92
986static void 93static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
987iscsi_tcp_data_ready(struct sock *sk, int flag)
988{ 94{
989 struct iscsi_conn *conn = sk->sk_user_data; 95 struct iscsi_conn *conn = sk->sk_user_data;
990 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 96 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -1000,7 +106,7 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
1000 */ 106 */
1001 rd_desc.arg.data = conn; 107 rd_desc.arg.data = conn;
1002 rd_desc.count = 1; 108 rd_desc.count = 1;
1003 tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv); 109 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
1004 110
1005 read_unlock(&sk->sk_callback_lock); 111 read_unlock(&sk->sk_callback_lock);
1006 112
@@ -1009,10 +115,10 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
1009 iscsi_tcp_segment_unmap(&tcp_conn->in.segment); 115 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
1010} 116}
1011 117
1012static void 118static void iscsi_sw_tcp_state_change(struct sock *sk)
1013iscsi_tcp_state_change(struct sock *sk)
1014{ 119{
1015 struct iscsi_tcp_conn *tcp_conn; 120 struct iscsi_tcp_conn *tcp_conn;
121 struct iscsi_sw_tcp_conn *tcp_sw_conn;
1016 struct iscsi_conn *conn; 122 struct iscsi_conn *conn;
1017 struct iscsi_session *session; 123 struct iscsi_session *session;
1018 void (*old_state_change)(struct sock *); 124 void (*old_state_change)(struct sock *);
@@ -1030,7 +136,8 @@ iscsi_tcp_state_change(struct sock *sk)
1030 } 136 }
1031 137
1032 tcp_conn = conn->dd_data; 138 tcp_conn = conn->dd_data;
1033 old_state_change = tcp_conn->old_state_change; 139 tcp_sw_conn = tcp_conn->dd_data;
140 old_state_change = tcp_sw_conn->old_state_change;
1034 141
1035 read_unlock(&sk->sk_callback_lock); 142 read_unlock(&sk->sk_callback_lock);
1036 143
@@ -1041,63 +148,123 @@ iscsi_tcp_state_change(struct sock *sk)
1041 * iscsi_write_space - Called when more output buffer space is available 148 * iscsi_write_space - Called when more output buffer space is available
1042 * @sk: socket space is available for 149 * @sk: socket space is available for
1043 **/ 150 **/
1044static void 151static void iscsi_sw_tcp_write_space(struct sock *sk)
1045iscsi_write_space(struct sock *sk)
1046{ 152{
1047 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; 153 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1048 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 154 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
155 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1049 156
1050 tcp_conn->old_write_space(sk); 157 tcp_sw_conn->old_write_space(sk);
1051 debug_tcp("iscsi_write_space: cid %d\n", conn->id); 158 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1052 scsi_queue_work(conn->session->host, &conn->xmitwork); 159 scsi_queue_work(conn->session->host, &conn->xmitwork);
1053} 160}
1054 161
1055static void 162static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
1056iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1057{ 163{
1058 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 164 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1059 struct sock *sk = tcp_conn->sock->sk; 165 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
166 struct sock *sk = tcp_sw_conn->sock->sk;
1060 167
1061 /* assign new callbacks */ 168 /* assign new callbacks */
1062 write_lock_bh(&sk->sk_callback_lock); 169 write_lock_bh(&sk->sk_callback_lock);
1063 sk->sk_user_data = conn; 170 sk->sk_user_data = conn;
1064 tcp_conn->old_data_ready = sk->sk_data_ready; 171 tcp_sw_conn->old_data_ready = sk->sk_data_ready;
1065 tcp_conn->old_state_change = sk->sk_state_change; 172 tcp_sw_conn->old_state_change = sk->sk_state_change;
1066 tcp_conn->old_write_space = sk->sk_write_space; 173 tcp_sw_conn->old_write_space = sk->sk_write_space;
1067 sk->sk_data_ready = iscsi_tcp_data_ready; 174 sk->sk_data_ready = iscsi_sw_tcp_data_ready;
1068 sk->sk_state_change = iscsi_tcp_state_change; 175 sk->sk_state_change = iscsi_sw_tcp_state_change;
1069 sk->sk_write_space = iscsi_write_space; 176 sk->sk_write_space = iscsi_sw_tcp_write_space;
1070 write_unlock_bh(&sk->sk_callback_lock); 177 write_unlock_bh(&sk->sk_callback_lock);
1071} 178}
1072 179
1073static void 180static void
1074iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) 181iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
1075{ 182{
1076 struct sock *sk = tcp_conn->sock->sk; 183 struct sock *sk = tcp_sw_conn->sock->sk;
1077 184
1078 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 185 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1079 write_lock_bh(&sk->sk_callback_lock); 186 write_lock_bh(&sk->sk_callback_lock);
1080 sk->sk_user_data = NULL; 187 sk->sk_user_data = NULL;
1081 sk->sk_data_ready = tcp_conn->old_data_ready; 188 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
1082 sk->sk_state_change = tcp_conn->old_state_change; 189 sk->sk_state_change = tcp_sw_conn->old_state_change;
1083 sk->sk_write_space = tcp_conn->old_write_space; 190 sk->sk_write_space = tcp_sw_conn->old_write_space;
1084 sk->sk_no_check = 0; 191 sk->sk_no_check = 0;
1085 write_unlock_bh(&sk->sk_callback_lock); 192 write_unlock_bh(&sk->sk_callback_lock);
1086} 193}
1087 194
1088/** 195/**
1089 * iscsi_xmit - TCP transmit 196 * iscsi_sw_tcp_xmit_segment - transmit segment
197 * @tcp_conn: the iSCSI TCP connection
198 * @segment: the buffer to transmnit
199 *
200 * This function transmits as much of the buffer as
201 * the network layer will accept, and returns the number of
202 * bytes transmitted.
203 *
204 * If CRC hashing is enabled, the function will compute the
205 * hash as it goes. When the entire segment has been transmitted,
206 * it will retrieve the hash value and send it as well.
207 */
208static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
209 struct iscsi_segment *segment)
210{
211 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
212 struct socket *sk = tcp_sw_conn->sock;
213 unsigned int copied = 0;
214 int r = 0;
215
216 while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
217 struct scatterlist *sg;
218 unsigned int offset, copy;
219 int flags = 0;
220
221 r = 0;
222 offset = segment->copied;
223 copy = segment->size - offset;
224
225 if (segment->total_copied + segment->size < segment->total_size)
226 flags |= MSG_MORE;
227
228 /* Use sendpage if we can; else fall back to sendmsg */
229 if (!segment->data) {
230 sg = segment->sg;
231 offset += segment->sg_offset + sg->offset;
232 r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
233 copy, flags);
234 } else {
235 struct msghdr msg = { .msg_flags = flags };
236 struct kvec iov = {
237 .iov_base = segment->data + offset,
238 .iov_len = copy
239 };
240
241 r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
242 }
243
244 if (r < 0) {
245 iscsi_tcp_segment_unmap(segment);
246 if (copied || r == -EAGAIN)
247 break;
248 return r;
249 }
250 copied += r;
251 }
252 return copied;
253}
254
255/**
256 * iscsi_sw_tcp_xmit - TCP transmit
1090 **/ 257 **/
1091static int 258static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
1092iscsi_xmit(struct iscsi_conn *conn)
1093{ 259{
1094 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 260 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1095 struct iscsi_segment *segment = &tcp_conn->out.segment; 261 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
262 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
1096 unsigned int consumed = 0; 263 unsigned int consumed = 0;
1097 int rc = 0; 264 int rc = 0;
1098 265
1099 while (1) { 266 while (1) {
1100 rc = iscsi_tcp_xmit_segment(tcp_conn, segment); 267 rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
1101 if (rc < 0) { 268 if (rc < 0) {
1102 rc = ISCSI_ERR_XMIT_FAILED; 269 rc = ISCSI_ERR_XMIT_FAILED;
1103 goto error; 270 goto error;
@@ -1132,22 +299,22 @@ error:
1132/** 299/**
1133 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit 300 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
1134 */ 301 */
1135static inline int 302static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
1136iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
1137{ 303{
1138 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 304 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1139 struct iscsi_segment *segment = &tcp_conn->out.segment; 305 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
306 struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
1140 307
1141 return segment->total_copied - segment->total_size; 308 return segment->total_copied - segment->total_size;
1142} 309}
1143 310
1144static inline int 311static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
1145iscsi_tcp_flush(struct iscsi_conn *conn)
1146{ 312{
313 struct iscsi_conn *conn = task->conn;
1147 int rc; 314 int rc;
1148 315
1149 while (iscsi_tcp_xmit_qlen(conn)) { 316 while (iscsi_sw_tcp_xmit_qlen(conn)) {
1150 rc = iscsi_xmit(conn); 317 rc = iscsi_sw_tcp_xmit(conn);
1151 if (rc == 0) 318 if (rc == 0)
1152 return -EAGAIN; 319 return -EAGAIN;
1153 if (rc < 0) 320 if (rc < 0)
@@ -1161,27 +328,31 @@ iscsi_tcp_flush(struct iscsi_conn *conn)
1161 * This is called when we're done sending the header. 328 * This is called when we're done sending the header.
1162 * Simply copy the data_segment to the send segment, and return. 329 * Simply copy the data_segment to the send segment, and return.
1163 */ 330 */
1164static int 331static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
1165iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn, 332 struct iscsi_segment *segment)
1166 struct iscsi_segment *segment)
1167{ 333{
1168 tcp_conn->out.segment = tcp_conn->out.data_segment; 334 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
335
336 tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
1169 debug_tcp("Header done. Next segment size %u total_size %u\n", 337 debug_tcp("Header done. Next segment size %u total_size %u\n",
1170 tcp_conn->out.segment.size, tcp_conn->out.segment.total_size); 338 tcp_sw_conn->out.segment.size,
339 tcp_sw_conn->out.segment.total_size);
1171 return 0; 340 return 0;
1172} 341}
1173 342
1174static void 343static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
1175iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) 344 size_t hdrlen)
1176{ 345{
1177 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 346 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
347 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1178 348
1179 debug_tcp("%s(%p%s)\n", __func__, tcp_conn, 349 debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
1180 conn->hdrdgst_en? ", digest enabled" : ""); 350 conn->hdrdgst_en? ", digest enabled" : "");
1181 351
1182 /* Clear the data segment - needs to be filled in by the 352 /* Clear the data segment - needs to be filled in by the
1183 * caller using iscsi_tcp_send_data_prep() */ 353 * caller using iscsi_tcp_send_data_prep() */
1184 memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment)); 354 memset(&tcp_sw_conn->out.data_segment, 0,
355 sizeof(struct iscsi_segment));
1185 356
1186 /* If header digest is enabled, compute the CRC and 357 /* If header digest is enabled, compute the CRC and
1187 * place the digest into the same buffer. We make 358 * place the digest into the same buffer. We make
@@ -1189,7 +360,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1189 * sufficient room. 360 * sufficient room.
1190 */ 361 */
1191 if (conn->hdrdgst_en) { 362 if (conn->hdrdgst_en) {
1192 iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen, 363 iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
1193 hdr + hdrlen); 364 hdr + hdrlen);
1194 hdrlen += ISCSI_DIGEST_SIZE; 365 hdrlen += ISCSI_DIGEST_SIZE;
1195 } 366 }
@@ -1197,10 +368,10 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1197 /* Remember header pointer for later, when we need 368 /* Remember header pointer for later, when we need
1198 * to decide whether there's a payload to go along 369 * to decide whether there's a payload to go along
1199 * with the header. */ 370 * with the header. */
1200 tcp_conn->out.hdr = hdr; 371 tcp_sw_conn->out.hdr = hdr;
1201 372
1202 iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen, 373 iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
1203 iscsi_tcp_send_hdr_done, NULL); 374 iscsi_sw_tcp_send_hdr_done, NULL);
1204} 375}
1205 376
1206/* 377/*
@@ -1209,11 +380,12 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
1209 * of by the iscsi_segment routines. 380 * of by the iscsi_segment routines.
1210 */ 381 */
1211static int 382static int
1212iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, 383iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1213 unsigned int count, unsigned int offset, 384 unsigned int count, unsigned int offset,
1214 unsigned int len) 385 unsigned int len)
1215{ 386{
1216 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 387 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
388 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1217 struct hash_desc *tx_hash = NULL; 389 struct hash_desc *tx_hash = NULL;
1218 unsigned int hdr_spec_len; 390 unsigned int hdr_spec_len;
1219 391
@@ -1223,22 +395,23 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
1223 395
1224 /* Make sure the datalen matches what the caller 396 /* Make sure the datalen matches what the caller
1225 said he would send. */ 397 said he would send. */
1226 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); 398 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
1227 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); 399 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1228 400
1229 if (conn->datadgst_en) 401 if (conn->datadgst_en)
1230 tx_hash = &tcp_conn->tx_hash; 402 tx_hash = &tcp_sw_conn->tx_hash;
1231 403
1232 return iscsi_segment_seek_sg(&tcp_conn->out.data_segment, 404 return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
1233 sg, count, offset, len, 405 sg, count, offset, len,
1234 NULL, tx_hash); 406 NULL, tx_hash);
1235} 407}
1236 408
1237static void 409static void
1238iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, 410iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
1239 size_t len) 411 size_t len)
1240{ 412{
1241 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 413 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
414 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1242 struct hash_desc *tx_hash = NULL; 415 struct hash_desc *tx_hash = NULL;
1243 unsigned int hdr_spec_len; 416 unsigned int hdr_spec_len;
1244 417
@@ -1247,341 +420,160 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
1247 420
1248 /* Make sure the datalen matches what the caller 421 /* Make sure the datalen matches what the caller
1249 said he would send. */ 422 said he would send. */
1250 hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength); 423 hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
1251 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len)); 424 WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
1252 425
1253 if (conn->datadgst_en) 426 if (conn->datadgst_en)
1254 tx_hash = &tcp_conn->tx_hash; 427 tx_hash = &tcp_sw_conn->tx_hash;
1255 428
1256 iscsi_segment_init_linear(&tcp_conn->out.data_segment, 429 iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
1257 data, len, NULL, tx_hash); 430 data, len, NULL, tx_hash);
1258} 431}
1259 432
1260/** 433static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
1261 * iscsi_solicit_data_cont - initialize next Data-Out 434 unsigned int offset, unsigned int count)
1262 * @conn: iscsi connection
1263 * @task: scsi command task
1264 * @r2t: R2T info
1265 * @left: bytes left to transfer
1266 *
1267 * Notes:
1268 * Initialize next Data-Out within this R2T sequence and continue
1269 * to process next Scatter-Gather element(if any) of this SCSI command.
1270 *
1271 * Called under connection lock.
1272 **/
1273static int
1274iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
1275 struct iscsi_r2t_info *r2t)
1276{ 435{
1277 struct iscsi_data *hdr;
1278 int new_offset, left;
1279
1280 BUG_ON(r2t->data_length - r2t->sent < 0);
1281 left = r2t->data_length - r2t->sent;
1282 if (left == 0)
1283 return 0;
1284
1285 hdr = &r2t->dtask.hdr;
1286 memset(hdr, 0, sizeof(struct iscsi_data));
1287 hdr->ttt = r2t->ttt;
1288 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1289 r2t->solicit_datasn++;
1290 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1291 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
1292 hdr->itt = task->hdr->itt;
1293 hdr->exp_statsn = r2t->exp_statsn;
1294 new_offset = r2t->data_offset + r2t->sent;
1295 hdr->offset = cpu_to_be32(new_offset);
1296 if (left > conn->max_xmit_dlength) {
1297 hton24(hdr->dlength, conn->max_xmit_dlength);
1298 r2t->data_count = conn->max_xmit_dlength;
1299 } else {
1300 hton24(hdr->dlength, left);
1301 r2t->data_count = left;
1302 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1303 }
1304
1305 conn->dataout_pdus_cnt++;
1306 return 1;
1307}
1308
1309/**
1310 * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1311 * @conn: iscsi connection
1312 * @task: scsi command task
1313 * @sc: scsi command
1314 **/
1315static int
1316iscsi_tcp_task_init(struct iscsi_task *task)
1317{
1318 struct iscsi_tcp_task *tcp_task = task->dd_data;
1319 struct iscsi_conn *conn = task->conn; 436 struct iscsi_conn *conn = task->conn;
1320 struct scsi_cmnd *sc = task->sc; 437 int err = 0;
1321 int err;
1322 438
1323 if (!sc) { 439 iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1324 /*
1325 * mgmt tasks do not have a scatterlist since they come
1326 * in from the iscsi interface.
1327 */
1328 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
1329 task->itt);
1330
1331 /* Prepare PDU, optionally w/ immediate data */
1332 iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
1333
1334 /* If we have immediate data, attach a payload */
1335 if (task->data_count)
1336 iscsi_tcp_send_linear_data_prepare(conn, task->data,
1337 task->data_count);
1338 return 0;
1339 }
1340 440
1341 BUG_ON(__kfifo_len(tcp_task->r2tqueue)); 441 if (!count)
1342 tcp_task->sent = 0; 442 return 0;
1343 tcp_task->exp_datasn = 0;
1344 443
1345 /* Prepare PDU, optionally w/ immediate data */ 444 if (!task->sc)
1346 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n", 445 iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
1347 conn->id, task->itt, task->imm_count, 446 else {
1348 task->unsol_count); 447 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1349 iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
1350 448
1351 if (!task->imm_count) 449 err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
1352 return 0; 450 sdb->table.nents, offset,
451 count);
452 }
1353 453
1354 /* If we have immediate data, attach a payload */ 454 if (err) {
1355 err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, 455 iscsi_conn_failure(conn, err);
1356 scsi_out(sc)->table.nents, 456 return -EIO;
1357 0, task->imm_count); 457 }
1358 if (err)
1359 return err;
1360 tcp_task->sent += task->imm_count;
1361 task->imm_count = 0;
1362 return 0; 458 return 0;
1363} 459}
1364 460
1365/* 461static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
1366 * iscsi_tcp_task_xmit - xmit normal PDU task
1367 * @task: iscsi command task
1368 *
1369 * We're expected to return 0 when everything was transmitted succesfully,
1370 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1371 * of error.
1372 */
1373static int
1374iscsi_tcp_task_xmit(struct iscsi_task *task)
1375{ 462{
1376 struct iscsi_conn *conn = task->conn;
1377 struct iscsi_tcp_task *tcp_task = task->dd_data; 463 struct iscsi_tcp_task *tcp_task = task->dd_data;
1378 struct scsi_cmnd *sc = task->sc;
1379 struct scsi_data_buffer *sdb;
1380 int rc = 0;
1381
1382flush:
1383 /* Flush any pending data first. */
1384 rc = iscsi_tcp_flush(conn);
1385 if (rc < 0)
1386 return rc;
1387
1388 /* mgmt command */
1389 if (!sc) {
1390 if (task->hdr->itt == RESERVED_ITT)
1391 iscsi_put_task(task);
1392 return 0;
1393 }
1394
1395 /* Are we done already? */
1396 if (sc->sc_data_direction != DMA_TO_DEVICE)
1397 return 0;
1398 464
1399 sdb = scsi_out(sc); 465 task->hdr = task->dd_data + sizeof(*tcp_task);
1400 if (task->unsol_count != 0) { 466 task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
1401 struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
1402
1403 /* Prepare a header for the unsolicited PDU.
1404 * The amount of data we want to send will be
1405 * in task->data_count.
1406 * FIXME: return the data count instead.
1407 */
1408 iscsi_prep_unsolicit_data_pdu(task, hdr);
1409
1410 debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
1411 task->itt, tcp_task->sent, task->data_count);
1412
1413 iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
1414 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1415 sdb->table.nents, tcp_task->sent,
1416 task->data_count);
1417 if (rc)
1418 goto fail;
1419 tcp_task->sent += task->data_count;
1420 task->unsol_count -= task->data_count;
1421 goto flush;
1422 } else {
1423 struct iscsi_session *session = conn->session;
1424 struct iscsi_r2t_info *r2t;
1425
1426 /* All unsolicited PDUs sent. Check for solicited PDUs.
1427 */
1428 spin_lock_bh(&session->lock);
1429 r2t = tcp_task->r2t;
1430 if (r2t != NULL) {
1431 /* Continue with this R2T? */
1432 if (!iscsi_solicit_data_cont(conn, task, r2t)) {
1433 debug_scsi(" done with r2t %p\n", r2t);
1434
1435 __kfifo_put(tcp_task->r2tpool.queue,
1436 (void*)&r2t, sizeof(void*));
1437 tcp_task->r2t = r2t = NULL;
1438 }
1439 }
1440
1441 if (r2t == NULL) {
1442 __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
1443 sizeof(void*));
1444 r2t = tcp_task->r2t;
1445 }
1446 spin_unlock_bh(&session->lock);
1447
1448 /* Waiting for more R2Ts to arrive. */
1449 if (r2t == NULL) {
1450 debug_tcp("no R2Ts yet\n");
1451 return 0;
1452 }
1453
1454 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1455 r2t, r2t->solicit_datasn - 1, task->itt,
1456 r2t->data_offset + r2t->sent, r2t->data_count);
1457
1458 iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
1459 sizeof(struct iscsi_hdr));
1460
1461 rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
1462 sdb->table.nents,
1463 r2t->data_offset + r2t->sent,
1464 r2t->data_count);
1465 if (rc)
1466 goto fail;
1467 tcp_task->sent += r2t->data_count;
1468 r2t->sent += r2t->data_count;
1469 goto flush;
1470 }
1471 return 0; 467 return 0;
1472fail:
1473 iscsi_conn_failure(conn, rc);
1474 return -EIO;
1475} 468}
1476 469
1477static struct iscsi_cls_conn * 470static struct iscsi_cls_conn *
1478iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 471iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
472 uint32_t conn_idx)
1479{ 473{
1480 struct iscsi_conn *conn; 474 struct iscsi_conn *conn;
1481 struct iscsi_cls_conn *cls_conn; 475 struct iscsi_cls_conn *cls_conn;
1482 struct iscsi_tcp_conn *tcp_conn; 476 struct iscsi_tcp_conn *tcp_conn;
477 struct iscsi_sw_tcp_conn *tcp_sw_conn;
1483 478
1484 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx); 479 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
480 conn_idx);
1485 if (!cls_conn) 481 if (!cls_conn)
1486 return NULL; 482 return NULL;
1487 conn = cls_conn->dd_data; 483 conn = cls_conn->dd_data;
1488 /*
1489 * due to strange issues with iser these are not set
1490 * in iscsi_conn_setup
1491 */
1492 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1493
1494 tcp_conn = conn->dd_data; 484 tcp_conn = conn->dd_data;
1495 tcp_conn->iscsi_conn = conn; 485 tcp_sw_conn = tcp_conn->dd_data;
1496 486
1497 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 487 tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1498 CRYPTO_ALG_ASYNC); 488 CRYPTO_ALG_ASYNC);
1499 tcp_conn->tx_hash.flags = 0; 489 tcp_sw_conn->tx_hash.flags = 0;
1500 if (IS_ERR(tcp_conn->tx_hash.tfm)) 490 if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
1501 goto free_conn; 491 goto free_conn;
1502 492
1503 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 493 tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1504 CRYPTO_ALG_ASYNC); 494 CRYPTO_ALG_ASYNC);
1505 tcp_conn->rx_hash.flags = 0; 495 tcp_sw_conn->rx_hash.flags = 0;
1506 if (IS_ERR(tcp_conn->rx_hash.tfm)) 496 if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
1507 goto free_tx_tfm; 497 goto free_tx_tfm;
498 tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
1508 499
1509 return cls_conn; 500 return cls_conn;
1510 501
1511free_tx_tfm: 502free_tx_tfm:
1512 crypto_free_hash(tcp_conn->tx_hash.tfm); 503 crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
1513free_conn: 504free_conn:
1514 iscsi_conn_printk(KERN_ERR, conn, 505 iscsi_conn_printk(KERN_ERR, conn,
1515 "Could not create connection due to crc32c " 506 "Could not create connection due to crc32c "
1516 "loading error. Make sure the crc32c " 507 "loading error. Make sure the crc32c "
1517 "module is built as a module or into the " 508 "module is built as a module or into the "
1518 "kernel\n"); 509 "kernel\n");
1519 iscsi_conn_teardown(cls_conn); 510 iscsi_tcp_conn_teardown(cls_conn);
1520 return NULL; 511 return NULL;
1521} 512}
1522 513
1523static void 514static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
1524iscsi_tcp_release_conn(struct iscsi_conn *conn)
1525{ 515{
1526 struct iscsi_session *session = conn->session; 516 struct iscsi_session *session = conn->session;
1527 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 517 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1528 struct socket *sock = tcp_conn->sock; 518 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
519 struct socket *sock = tcp_sw_conn->sock;
1529 520
1530 if (!sock) 521 if (!sock)
1531 return; 522 return;
1532 523
1533 sock_hold(sock->sk); 524 sock_hold(sock->sk);
1534 iscsi_conn_restore_callbacks(tcp_conn); 525 iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
1535 sock_put(sock->sk); 526 sock_put(sock->sk);
1536 527
1537 spin_lock_bh(&session->lock); 528 spin_lock_bh(&session->lock);
1538 tcp_conn->sock = NULL; 529 tcp_sw_conn->sock = NULL;
1539 spin_unlock_bh(&session->lock); 530 spin_unlock_bh(&session->lock);
1540 sockfd_put(sock); 531 sockfd_put(sock);
1541} 532}
1542 533
1543static void 534static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1544iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1545{ 535{
1546 struct iscsi_conn *conn = cls_conn->dd_data; 536 struct iscsi_conn *conn = cls_conn->dd_data;
1547 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 537 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
538 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1548 539
1549 iscsi_tcp_release_conn(conn); 540 iscsi_sw_tcp_release_conn(conn);
1550 541
1551 if (tcp_conn->tx_hash.tfm) 542 if (tcp_sw_conn->tx_hash.tfm)
1552 crypto_free_hash(tcp_conn->tx_hash.tfm); 543 crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
1553 if (tcp_conn->rx_hash.tfm) 544 if (tcp_sw_conn->rx_hash.tfm)
1554 crypto_free_hash(tcp_conn->rx_hash.tfm); 545 crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
1555 546
1556 iscsi_conn_teardown(cls_conn); 547 iscsi_tcp_conn_teardown(cls_conn);
1557} 548}
1558 549
1559static void 550static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1560iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1561{ 551{
1562 struct iscsi_conn *conn = cls_conn->dd_data; 552 struct iscsi_conn *conn = cls_conn->dd_data;
1563 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 553 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
554 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1564 555
1565 /* userspace may have goofed up and not bound us */ 556 /* userspace may have goofed up and not bound us */
1566 if (!tcp_conn->sock) 557 if (!tcp_sw_conn->sock)
1567 return; 558 return;
1568 /* 559 /*
1569 * Make sure our recv side is stopped. 560 * Make sure our recv side is stopped.
1570 * Older tools called conn stop before ep_disconnect 561 * Older tools called conn stop before ep_disconnect
1571 * so IO could still be coming in. 562 * so IO could still be coming in.
1572 */ 563 */
1573 write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock); 564 write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
1574 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 565 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1575 write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock); 566 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
1576 567
1577 iscsi_conn_stop(cls_conn, flag); 568 iscsi_conn_stop(cls_conn, flag);
1578 iscsi_tcp_release_conn(conn); 569 iscsi_sw_tcp_release_conn(conn);
1579} 570}
1580 571
1581static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, 572static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
1582 char *buf, int *port, 573 char *buf, int *port,
1583 int (*getname)(struct socket *, struct sockaddr *, 574 int (*getname)(struct socket *,
1584 int *addrlen)) 575 struct sockaddr *,
576 int *addrlen))
1585{ 577{
1586 struct sockaddr_storage *addr; 578 struct sockaddr_storage *addr;
1587 struct sockaddr_in6 *sin6; 579 struct sockaddr_in6 *sin6;
@@ -1619,14 +611,15 @@ free_addr:
1619} 611}
1620 612
1621static int 613static int
1622iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 614iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1623 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 615 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
1624 int is_leading) 616 int is_leading)
1625{ 617{
1626 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 618 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1627 struct iscsi_host *ihost = shost_priv(shost); 619 struct iscsi_host *ihost = shost_priv(shost);
1628 struct iscsi_conn *conn = cls_conn->dd_data; 620 struct iscsi_conn *conn = cls_conn->dd_data;
1629 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 621 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
622 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1630 struct sock *sk; 623 struct sock *sk;
1631 struct socket *sock; 624 struct socket *sock;
1632 int err; 625 int err;
@@ -1643,13 +636,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1643 * userspace may still want to query the values since we will 636 * userspace may still want to query the values since we will
1644 * be using them for the reconnect 637 * be using them for the reconnect
1645 */ 638 */
1646 err = iscsi_tcp_get_addr(conn, sock, conn->portal_address, 639 err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
1647 &conn->portal_port, kernel_getpeername); 640 &conn->portal_port, kernel_getpeername);
1648 if (err) 641 if (err)
1649 goto free_socket; 642 goto free_socket;
1650 643
1651 err = iscsi_tcp_get_addr(conn, sock, ihost->local_address, 644 err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
1652 &ihost->local_port, kernel_getsockname); 645 &ihost->local_port, kernel_getsockname);
1653 if (err) 646 if (err)
1654 goto free_socket; 647 goto free_socket;
1655 648
@@ -1658,7 +651,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1658 goto free_socket; 651 goto free_socket;
1659 652
1660 /* bind iSCSI connection and socket */ 653 /* bind iSCSI connection and socket */
1661 tcp_conn->sock = sock; 654 tcp_sw_conn->sock = sock;
1662 655
1663 /* setup Socket parameters */ 656 /* setup Socket parameters */
1664 sk = sock->sk; 657 sk = sock->sk;
@@ -1666,8 +659,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 659 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
1667 sk->sk_allocation = GFP_ATOMIC; 660 sk->sk_allocation = GFP_ATOMIC;
1668 661
1669 iscsi_conn_set_callbacks(conn); 662 iscsi_sw_tcp_conn_set_callbacks(conn);
1670 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; 663 tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
1671 /* 664 /*
1672 * set receive state machine into initial state 665 * set receive state machine into initial state
1673 */ 666 */
@@ -1679,74 +672,14 @@ free_socket:
1679 return err; 672 return err;
1680} 673}
1681 674
1682static int 675static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
1683iscsi_r2tpool_alloc(struct iscsi_session *session) 676 enum iscsi_param param, char *buf,
1684{ 677 int buflen)
1685 int i;
1686 int cmd_i;
1687
1688 /*
1689 * initialize per-task: R2T pool and xmit queue
1690 */
1691 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1692 struct iscsi_task *task = session->cmds[cmd_i];
1693 struct iscsi_tcp_task *tcp_task = task->dd_data;
1694
1695 /*
1696 * pre-allocated x4 as much r2ts to handle race when
1697 * target acks DataOut faster than we data_xmit() queues
1698 * could replenish r2tqueue.
1699 */
1700
1701 /* R2T pool */
1702 if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
1703 sizeof(struct iscsi_r2t_info))) {
1704 goto r2t_alloc_fail;
1705 }
1706
1707 /* R2T xmit queue */
1708 tcp_task->r2tqueue = kfifo_alloc(
1709 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1710 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1711 iscsi_pool_free(&tcp_task->r2tpool);
1712 goto r2t_alloc_fail;
1713 }
1714 }
1715
1716 return 0;
1717
1718r2t_alloc_fail:
1719 for (i = 0; i < cmd_i; i++) {
1720 struct iscsi_task *task = session->cmds[i];
1721 struct iscsi_tcp_task *tcp_task = task->dd_data;
1722
1723 kfifo_free(tcp_task->r2tqueue);
1724 iscsi_pool_free(&tcp_task->r2tpool);
1725 }
1726 return -ENOMEM;
1727}
1728
1729static void
1730iscsi_r2tpool_free(struct iscsi_session *session)
1731{
1732 int i;
1733
1734 for (i = 0; i < session->cmds_max; i++) {
1735 struct iscsi_task *task = session->cmds[i];
1736 struct iscsi_tcp_task *tcp_task = task->dd_data;
1737
1738 kfifo_free(tcp_task->r2tqueue);
1739 iscsi_pool_free(&tcp_task->r2tpool);
1740 }
1741}
1742
1743static int
1744iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1745 char *buf, int buflen)
1746{ 678{
1747 struct iscsi_conn *conn = cls_conn->dd_data; 679 struct iscsi_conn *conn = cls_conn->dd_data;
1748 struct iscsi_session *session = conn->session; 680 struct iscsi_session *session = conn->session;
1749 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 681 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
682 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1750 int value; 683 int value;
1751 684
1752 switch(param) { 685 switch(param) {
@@ -1755,8 +688,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1755 break; 688 break;
1756 case ISCSI_PARAM_DATADGST_EN: 689 case ISCSI_PARAM_DATADGST_EN:
1757 iscsi_set_param(cls_conn, param, buf, buflen); 690 iscsi_set_param(cls_conn, param, buf, buflen);
1758 tcp_conn->sendpage = conn->datadgst_en ? 691 tcp_sw_conn->sendpage = conn->datadgst_en ?
1759 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 692 sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
1760 break; 693 break;
1761 case ISCSI_PARAM_MAX_R2T: 694 case ISCSI_PARAM_MAX_R2T:
1762 sscanf(buf, "%d", &value); 695 sscanf(buf, "%d", &value);
@@ -1764,9 +697,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1764 return -EINVAL; 697 return -EINVAL;
1765 if (session->max_r2t == value) 698 if (session->max_r2t == value)
1766 break; 699 break;
1767 iscsi_r2tpool_free(session); 700 iscsi_tcp_r2tpool_free(session);
1768 iscsi_set_param(cls_conn, param, buf, buflen); 701 iscsi_set_param(cls_conn, param, buf, buflen);
1769 if (iscsi_r2tpool_alloc(session)) 702 if (iscsi_tcp_r2tpool_alloc(session))
1770 return -ENOMEM; 703 return -ENOMEM;
1771 break; 704 break;
1772 default: 705 default:
@@ -1776,9 +709,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
1776 return 0; 709 return 0;
1777} 710}
1778 711
1779static int 712static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1780iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, 713 enum iscsi_param param, char *buf)
1781 enum iscsi_param param, char *buf)
1782{ 714{
1783 struct iscsi_conn *conn = cls_conn->dd_data; 715 struct iscsi_conn *conn = cls_conn->dd_data;
1784 int len; 716 int len;
@@ -1802,48 +734,42 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
1802} 734}
1803 735
1804static void 736static void
1805iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 737iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
738 struct iscsi_stats *stats)
1806{ 739{
1807 struct iscsi_conn *conn = cls_conn->dd_data; 740 struct iscsi_conn *conn = cls_conn->dd_data;
1808 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 741 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
742 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
1809 743
1810 stats->txdata_octets = conn->txdata_octets;
1811 stats->rxdata_octets = conn->rxdata_octets;
1812 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1813 stats->dataout_pdus = conn->dataout_pdus_cnt;
1814 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1815 stats->datain_pdus = conn->datain_pdus_cnt;
1816 stats->r2t_pdus = conn->r2t_pdus_cnt;
1817 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1818 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1819 stats->custom_length = 3; 744 stats->custom_length = 3;
1820 strcpy(stats->custom[0].desc, "tx_sendpage_failures"); 745 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
1821 stats->custom[0].value = tcp_conn->sendpage_failures_cnt; 746 stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
1822 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); 747 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
1823 stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt; 748 stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
1824 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 749 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1825 stats->custom[2].value = conn->eh_abort_cnt; 750 stats->custom[2].value = conn->eh_abort_cnt;
751
752 iscsi_tcp_conn_get_stats(cls_conn, stats);
1826} 753}
1827 754
1828static struct iscsi_cls_session * 755static struct iscsi_cls_session *
1829iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, 756iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1830 uint16_t qdepth, uint32_t initial_cmdsn, 757 uint16_t qdepth, uint32_t initial_cmdsn,
1831 uint32_t *hostno) 758 uint32_t *hostno)
1832{ 759{
1833 struct iscsi_cls_session *cls_session; 760 struct iscsi_cls_session *cls_session;
1834 struct iscsi_session *session; 761 struct iscsi_session *session;
1835 struct Scsi_Host *shost; 762 struct Scsi_Host *shost;
1836 int cmd_i;
1837 763
1838 if (ep) { 764 if (ep) {
1839 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); 765 printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
1840 return NULL; 766 return NULL;
1841 } 767 }
1842 768
1843 shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth); 769 shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, qdepth);
1844 if (!shost) 770 if (!shost)
1845 return NULL; 771 return NULL;
1846 shost->transportt = iscsi_tcp_scsi_transport; 772 shost->transportt = iscsi_sw_tcp_scsi_transport;
1847 shost->max_lun = iscsi_max_lun; 773 shost->max_lun = iscsi_max_lun;
1848 shost->max_id = 0; 774 shost->max_id = 0;
1849 shost->max_channel = 0; 775 shost->max_channel = 0;
@@ -1853,23 +779,17 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
1853 goto free_host; 779 goto free_host;
1854 *hostno = shost->host_no; 780 *hostno = shost->host_no;
1855 781
1856 cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max, 782 cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
1857 sizeof(struct iscsi_tcp_task), 783 cmds_max,
784 sizeof(struct iscsi_tcp_task) +
785 sizeof(struct iscsi_sw_tcp_hdrbuf),
1858 initial_cmdsn, 0); 786 initial_cmdsn, 0);
1859 if (!cls_session) 787 if (!cls_session)
1860 goto remove_host; 788 goto remove_host;
1861 session = cls_session->dd_data; 789 session = cls_session->dd_data;
1862 790
1863 shost->can_queue = session->scsi_cmds_max; 791 shost->can_queue = session->scsi_cmds_max;
1864 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 792 if (iscsi_tcp_r2tpool_alloc(session))
1865 struct iscsi_task *task = session->cmds[cmd_i];
1866 struct iscsi_tcp_task *tcp_task = task->dd_data;
1867
1868 task->hdr = &tcp_task->hdr.cmd_hdr;
1869 task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
1870 }
1871
1872 if (iscsi_r2tpool_alloc(session))
1873 goto remove_session; 793 goto remove_session;
1874 return cls_session; 794 return cls_session;
1875 795
@@ -1882,25 +802,25 @@ free_host:
1882 return NULL; 802 return NULL;
1883} 803}
1884 804
1885static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) 805static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
1886{ 806{
1887 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 807 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1888 808
1889 iscsi_r2tpool_free(cls_session->dd_data); 809 iscsi_tcp_r2tpool_free(cls_session->dd_data);
1890 iscsi_session_teardown(cls_session); 810 iscsi_session_teardown(cls_session);
1891 811
1892 iscsi_host_remove(shost); 812 iscsi_host_remove(shost);
1893 iscsi_host_free(shost); 813 iscsi_host_free(shost);
1894} 814}
1895 815
1896static int iscsi_tcp_slave_configure(struct scsi_device *sdev) 816static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
1897{ 817{
1898 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); 818 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
1899 blk_queue_dma_alignment(sdev->request_queue, 0); 819 blk_queue_dma_alignment(sdev->request_queue, 0);
1900 return 0; 820 return 0;
1901} 821}
1902 822
1903static struct scsi_host_template iscsi_sht = { 823static struct scsi_host_template iscsi_sw_tcp_sht = {
1904 .module = THIS_MODULE, 824 .module = THIS_MODULE,
1905 .name = "iSCSI Initiator over TCP/IP", 825 .name = "iSCSI Initiator over TCP/IP",
1906 .queuecommand = iscsi_queuecommand, 826 .queuecommand = iscsi_queuecommand,
@@ -1913,12 +833,12 @@ static struct scsi_host_template iscsi_sht = {
1913 .eh_device_reset_handler= iscsi_eh_device_reset, 833 .eh_device_reset_handler= iscsi_eh_device_reset,
1914 .eh_target_reset_handler= iscsi_eh_target_reset, 834 .eh_target_reset_handler= iscsi_eh_target_reset,
1915 .use_clustering = DISABLE_CLUSTERING, 835 .use_clustering = DISABLE_CLUSTERING,
1916 .slave_configure = iscsi_tcp_slave_configure, 836 .slave_configure = iscsi_sw_tcp_slave_configure,
1917 .proc_name = "iscsi_tcp", 837 .proc_name = "iscsi_tcp",
1918 .this_id = -1, 838 .this_id = -1,
1919}; 839};
1920 840
1921static struct iscsi_transport iscsi_tcp_transport = { 841static struct iscsi_transport iscsi_sw_tcp_transport = {
1922 .owner = THIS_MODULE, 842 .owner = THIS_MODULE,
1923 .name = "tcp", 843 .name = "tcp",
1924 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST 844 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
@@ -1951,32 +871,36 @@ static struct iscsi_transport iscsi_tcp_transport = {
1951 ISCSI_HOST_INITIATOR_NAME | 871 ISCSI_HOST_INITIATOR_NAME |
1952 ISCSI_HOST_NETDEV_NAME, 872 ISCSI_HOST_NETDEV_NAME,
1953 /* session management */ 873 /* session management */
1954 .create_session = iscsi_tcp_session_create, 874 .create_session = iscsi_sw_tcp_session_create,
1955 .destroy_session = iscsi_tcp_session_destroy, 875 .destroy_session = iscsi_sw_tcp_session_destroy,
1956 /* connection management */ 876 /* connection management */
1957 .create_conn = iscsi_tcp_conn_create, 877 .create_conn = iscsi_sw_tcp_conn_create,
1958 .bind_conn = iscsi_tcp_conn_bind, 878 .bind_conn = iscsi_sw_tcp_conn_bind,
1959 .destroy_conn = iscsi_tcp_conn_destroy, 879 .destroy_conn = iscsi_sw_tcp_conn_destroy,
1960 .set_param = iscsi_conn_set_param, 880 .set_param = iscsi_sw_tcp_conn_set_param,
1961 .get_conn_param = iscsi_tcp_conn_get_param, 881 .get_conn_param = iscsi_sw_tcp_conn_get_param,
1962 .get_session_param = iscsi_session_get_param, 882 .get_session_param = iscsi_session_get_param,
1963 .start_conn = iscsi_conn_start, 883 .start_conn = iscsi_conn_start,
1964 .stop_conn = iscsi_tcp_conn_stop, 884 .stop_conn = iscsi_sw_tcp_conn_stop,
1965 /* iscsi host params */ 885 /* iscsi host params */
1966 .get_host_param = iscsi_host_get_param, 886 .get_host_param = iscsi_host_get_param,
1967 .set_host_param = iscsi_host_set_param, 887 .set_host_param = iscsi_host_set_param,
1968 /* IO */ 888 /* IO */
1969 .send_pdu = iscsi_conn_send_pdu, 889 .send_pdu = iscsi_conn_send_pdu,
1970 .get_stats = iscsi_conn_get_stats, 890 .get_stats = iscsi_sw_tcp_conn_get_stats,
891 /* iscsi task/cmd helpers */
1971 .init_task = iscsi_tcp_task_init, 892 .init_task = iscsi_tcp_task_init,
1972 .xmit_task = iscsi_tcp_task_xmit, 893 .xmit_task = iscsi_tcp_task_xmit,
1973 .cleanup_task = iscsi_tcp_cleanup_task, 894 .cleanup_task = iscsi_tcp_cleanup_task,
895 /* low level pdu helpers */
896 .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
897 .init_pdu = iscsi_sw_tcp_pdu_init,
898 .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
1974 /* recovery */ 899 /* recovery */
1975 .session_recovery_timedout = iscsi_session_recovery_timedout, 900 .session_recovery_timedout = iscsi_session_recovery_timedout,
1976}; 901};
1977 902
1978static int __init 903static int __init iscsi_sw_tcp_init(void)
1979iscsi_tcp_init(void)
1980{ 904{
1981 if (iscsi_max_lun < 1) { 905 if (iscsi_max_lun < 1) {
1982 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n", 906 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
@@ -1984,19 +908,18 @@ iscsi_tcp_init(void)
1984 return -EINVAL; 908 return -EINVAL;
1985 } 909 }
1986 910
1987 iscsi_tcp_scsi_transport = iscsi_register_transport( 911 iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
1988 &iscsi_tcp_transport); 912 &iscsi_sw_tcp_transport);
1989 if (!iscsi_tcp_scsi_transport) 913 if (!iscsi_sw_tcp_scsi_transport)
1990 return -ENODEV; 914 return -ENODEV;
1991 915
1992 return 0; 916 return 0;
1993} 917}
1994 918
1995static void __exit 919static void __exit iscsi_sw_tcp_exit(void)
1996iscsi_tcp_exit(void)
1997{ 920{
1998 iscsi_unregister_transport(&iscsi_tcp_transport); 921 iscsi_unregister_transport(&iscsi_sw_tcp_transport);
1999} 922}
2000 923
2001module_init(iscsi_tcp_init); 924module_init(iscsi_sw_tcp_init);
2002module_exit(iscsi_tcp_exit); 925module_exit(iscsi_sw_tcp_exit);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 498d8ca39848..ca6b7bc64de0 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -19,67 +19,27 @@
19 * See the file COPYING included with this distribution for more details. 19 * See the file COPYING included with this distribution for more details.
20 */ 20 */
21 21
22#ifndef ISCSI_TCP_H 22#ifndef ISCSI_SW_TCP_H
23#define ISCSI_TCP_H 23#define ISCSI_SW_TCP_H
24 24
25#include <scsi/libiscsi.h> 25#include <scsi/libiscsi.h>
26#include <scsi/libiscsi_tcp.h>
26 27
27struct crypto_hash;
28struct socket; 28struct socket;
29struct iscsi_tcp_conn; 29struct iscsi_tcp_conn;
30struct iscsi_segment;
31
32typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
33 struct iscsi_segment *);
34
35struct iscsi_segment {
36 unsigned char *data;
37 unsigned int size;
38 unsigned int copied;
39 unsigned int total_size;
40 unsigned int total_copied;
41
42 struct hash_desc *hash;
43 unsigned char recv_digest[ISCSI_DIGEST_SIZE];
44 unsigned char digest[ISCSI_DIGEST_SIZE];
45 unsigned int digest_len;
46
47 struct scatterlist *sg;
48 void *sg_mapped;
49 unsigned int sg_offset;
50
51 iscsi_segment_done_fn_t *done;
52};
53
54/* Socket connection recieve helper */
55struct iscsi_tcp_recv {
56 struct iscsi_hdr *hdr;
57 struct iscsi_segment segment;
58
59 /* Allocate buffer for BHS + AHS */
60 uint32_t hdr_buf[64];
61
62 /* copied and flipped values */
63 int datalen;
64};
65 30
66/* Socket connection send helper */ 31/* Socket connection send helper */
67struct iscsi_tcp_send { 32struct iscsi_sw_tcp_send {
68 struct iscsi_hdr *hdr; 33 struct iscsi_hdr *hdr;
69 struct iscsi_segment segment; 34 struct iscsi_segment segment;
70 struct iscsi_segment data_segment; 35 struct iscsi_segment data_segment;
71}; 36};
72 37
73struct iscsi_tcp_conn { 38struct iscsi_sw_tcp_conn {
74 struct iscsi_conn *iscsi_conn; 39 struct iscsi_conn *iscsi_conn;
75 struct socket *sock; 40 struct socket *sock;
76 int stop_stage; /* conn_stop() flag: *
77 * stop to recover, *
78 * stop to terminate */
79 /* control data */
80 struct iscsi_tcp_recv in; /* TCP receive context */
81 struct iscsi_tcp_send out; /* TCP send context */
82 41
42 struct iscsi_sw_tcp_send out;
83 /* old values for socket callbacks */ 43 /* old values for socket callbacks */
84 void (*old_data_ready)(struct sock *, int); 44 void (*old_data_ready)(struct sock *, int);
85 void (*old_state_change)(struct sock *); 45 void (*old_state_change)(struct sock *);
@@ -93,41 +53,13 @@ struct iscsi_tcp_conn {
93 uint32_t sendpage_failures_cnt; 53 uint32_t sendpage_failures_cnt;
94 uint32_t discontiguous_hdr_cnt; 54 uint32_t discontiguous_hdr_cnt;
95 55
96 int error;
97
98 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); 56 ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
99}; 57};
100 58
101struct iscsi_data_task { 59struct iscsi_sw_tcp_hdrbuf {
102 struct iscsi_data hdr; /* PDU */ 60 struct iscsi_hdr hdrbuf;
103 char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ 61 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
104};
105
106struct iscsi_r2t_info {
107 __be32 ttt; /* copied from R2T */
108 __be32 exp_statsn; /* copied from R2T */
109 uint32_t data_length; /* copied from R2T */
110 uint32_t data_offset; /* copied from R2T */
111 int sent; /* R2T sequence progress */
112 int data_count; /* DATA-Out payload progress */
113 int solicit_datasn;
114 struct iscsi_data_task dtask; /* Data-Out header buf */
115};
116
117struct iscsi_tcp_task {
118 struct iscsi_hdr_buff {
119 struct iscsi_cmd cmd_hdr;
120 char hdrextbuf[ISCSI_MAX_AHS_SIZE +
121 ISCSI_DIGEST_SIZE]; 62 ISCSI_DIGEST_SIZE];
122 } hdr;
123
124 int sent;
125 uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
126 int data_offset;
127 struct iscsi_r2t_info *r2t; /* in progress R2T */
128 struct iscsi_pool r2tpool;
129 struct kfifo *r2tqueue;
130 struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
131}; 63};
132 64
133#endif /* ISCSI_H */ 65#endif /* ISCSI_SW_TCP_H */
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000000..55f982de3a9a
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
1# $Id: Makefile
2
3obj-$(CONFIG_LIBFC) += libfc.o
4
5libfc-objs := \
6 fc_disc.o \
7 fc_exch.o \
8 fc_elsct.o \
9 fc_frame.o \
10 fc_lport.o \
11 fc_rport.o \
12 fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000000..dd1564c9e04a
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27/*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35#include <linux/timer.h>
36#include <linux/err.h>
37#include <asm/unaligned.h>
38
39#include <scsi/fc/fc_gs.h>
40
41#include <scsi/libfc.h>
42
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46#define FC_DISC_DELAY 3
47
48static int fc_disc_debug;
49
50#define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56static void fc_disc_gpn_ft_req(struct fc_disc *);
57static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
59 struct fc_rport_identifiers *);
60static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
61static void fc_disc_done(struct fc_disc *);
62static void fc_disc_timeout(struct work_struct *);
63static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
64static void fc_disc_restart(struct fc_disc *);
65
66/**
67 * fc_disc_lookup_rport - lookup a remote port by port_id
68 * @lport: Fibre Channel host port instance
69 * @port_id: remote port port_id to match
70 */
71struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
72 u32 port_id)
73{
74 const struct fc_disc *disc = &lport->disc;
75 struct fc_rport *rport, *found = NULL;
76 struct fc_rport_libfc_priv *rdata;
77 int disc_found = 0;
78
79 list_for_each_entry(rdata, &disc->rports, peers) {
80 rport = PRIV_TO_RPORT(rdata);
81 if (rport->port_id == port_id) {
82 disc_found = 1;
83 found = rport;
84 break;
85 }
86 }
87
88 if (!disc_found)
89 found = NULL;
90
91 return found;
92}
93
94/**
95 * fc_disc_stop_rports - delete all the remote ports associated with the lport
96 * @disc: The discovery job to stop rports on
97 *
98 * Locking Note: This function expects that the lport mutex is locked before
99 * calling it.
100 */
101void fc_disc_stop_rports(struct fc_disc *disc)
102{
103 struct fc_lport *lport;
104 struct fc_rport *rport;
105 struct fc_rport_libfc_priv *rdata, *next;
106
107 lport = disc->lport;
108
109 mutex_lock(&disc->disc_mutex);
110 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
111 rport = PRIV_TO_RPORT(rdata);
112 list_del(&rdata->peers);
113 lport->tt.rport_logoff(rport);
114 }
115
116 mutex_unlock(&disc->disc_mutex);
117}
118
119/**
120 * fc_disc_rport_callback - Event handler for rport events
121 * @lport: The lport which is receiving the event
122 * @rport: The rport which the event has occured on
123 * @event: The event that occured
124 *
125 * Locking Note: The rport lock should not be held when calling
126 * this function.
127 */
128static void fc_disc_rport_callback(struct fc_lport *lport,
129 struct fc_rport *rport,
130 enum fc_rport_event event)
131{
132 struct fc_rport_libfc_priv *rdata = rport->dd_data;
133 struct fc_disc *disc = &lport->disc;
134 int found = 0;
135
136 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
137 rport->port_id);
138
139 if (event == RPORT_EV_CREATED) {
140 if (disc) {
141 found = 1;
142 mutex_lock(&disc->disc_mutex);
143 list_add_tail(&rdata->peers, &disc->rports);
144 mutex_unlock(&disc->disc_mutex);
145 }
146 }
147
148 if (!found)
149 FC_DEBUG_DISC("The rport (%6x) is not maintained "
150 "by the discovery layer\n", rport->port_id);
151}
152
153/**
154 * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
155 * @sp: Current sequence of the RSCN exchange
156 * @fp: RSCN Frame
157 * @lport: Fibre Channel host port instance
158 *
159 * Locking Note: This function expects that the disc_mutex is locked
160 * before it is called.
161 */
162static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
163 struct fc_disc *disc)
164{
165 struct fc_lport *lport;
166 struct fc_rport *rport;
167 struct fc_rport_libfc_priv *rdata;
168 struct fc_els_rscn *rp;
169 struct fc_els_rscn_page *pp;
170 struct fc_seq_els_data rjt_data;
171 unsigned int len;
172 int redisc = 0;
173 enum fc_els_rscn_ev_qual ev_qual;
174 enum fc_els_rscn_addr_fmt fmt;
175 LIST_HEAD(disc_ports);
176 struct fc_disc_port *dp, *next;
177
178 lport = disc->lport;
179
180 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
181 fc_host_port_id(lport->host));
182
183 /* make sure the frame contains an RSCN message */
184 rp = fc_frame_payload_get(fp, sizeof(*rp));
185 if (!rp)
186 goto reject;
187 /* make sure the page length is as expected (4 bytes) */
188 if (rp->rscn_page_len != sizeof(*pp))
189 goto reject;
190 /* get the RSCN payload length */
191 len = ntohs(rp->rscn_plen);
192 if (len < sizeof(*rp))
193 goto reject;
194 /* make sure the frame contains the expected payload */
195 rp = fc_frame_payload_get(fp, len);
196 if (!rp)
197 goto reject;
198 /* payload must be a multiple of the RSCN page size */
199 len -= sizeof(*rp);
200 if (len % sizeof(*pp))
201 goto reject;
202
203 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
204 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
205 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
206 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
207 fmt &= ELS_RSCN_ADDR_FMT_MASK;
208 /*
209 * if we get an address format other than port
210 * (area, domain, fabric), then do a full discovery
211 */
212 switch (fmt) {
213 case ELS_ADDR_FMT_PORT:
214 FC_DEBUG_DISC("Port address format for port (%6x)\n",
215 ntoh24(pp->rscn_fid));
216 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
217 if (!dp) {
218 redisc = 1;
219 break;
220 }
221 dp->lp = lport;
222 dp->ids.port_id = ntoh24(pp->rscn_fid);
223 dp->ids.port_name = -1;
224 dp->ids.node_name = -1;
225 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
226 list_add_tail(&dp->peers, &disc_ports);
227 break;
228 case ELS_ADDR_FMT_AREA:
229 case ELS_ADDR_FMT_DOM:
230 case ELS_ADDR_FMT_FAB:
231 default:
232 FC_DEBUG_DISC("Address format is (%d)\n", fmt);
233 redisc = 1;
234 break;
235 }
236 }
237 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
238 if (redisc) {
239 FC_DEBUG_DISC("RSCN received: rediscovering\n");
240 fc_disc_restart(disc);
241 } else {
242 FC_DEBUG_DISC("RSCN received: not rediscovering. "
243 "redisc %d state %d in_prog %d\n",
244 redisc, lport->state, disc->pending);
245 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
246 list_del(&dp->peers);
247 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
248 if (rport) {
249 rdata = RPORT_TO_PRIV(rport);
250 list_del(&rdata->peers);
251 lport->tt.rport_logoff(rport);
252 }
253 fc_disc_single(disc, dp);
254 }
255 }
256 fc_frame_free(fp);
257 return;
258reject:
259 FC_DEBUG_DISC("Received a bad RSCN frame\n");
260 rjt_data.fp = NULL;
261 rjt_data.reason = ELS_RJT_LOGIC;
262 rjt_data.explan = ELS_EXPL_NONE;
263 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
264 fc_frame_free(fp);
265}
266
267/**
268 * fc_disc_recv_req - Handle incoming requests
269 * @sp: Current sequence of the request exchange
270 * @fp: The frame
271 * @lport: The FC local port
272 *
273 * Locking Note: This function is called from the EM and will lock
274 * the disc_mutex before calling the handler for the
275 * request.
276 */
277static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
278 struct fc_lport *lport)
279{
280 u8 op;
281 struct fc_disc *disc = &lport->disc;
282
283 op = fc_frame_payload_op(fp);
284 switch (op) {
285 case ELS_RSCN:
286 mutex_lock(&disc->disc_mutex);
287 fc_disc_recv_rscn_req(sp, fp, disc);
288 mutex_unlock(&disc->disc_mutex);
289 break;
290 default:
291 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
292 break;
293 }
294}
295
296/**
297 * fc_disc_restart - Restart discovery
298 * @lport: FC discovery context
299 *
300 * Locking Note: This function expects that the disc mutex
301 * is already locked.
302 */
303static void fc_disc_restart(struct fc_disc *disc)
304{
305 struct fc_rport *rport;
306 struct fc_rport_libfc_priv *rdata, *next;
307 struct fc_lport *lport = disc->lport;
308
309 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
310 fc_host_port_id(lport->host));
311
312 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
313 rport = PRIV_TO_RPORT(rdata);
314 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
315 list_del(&rdata->peers);
316 lport->tt.rport_logoff(rport);
317 }
318
319 disc->requested = 1;
320 if (!disc->pending)
321 fc_disc_gpn_ft_req(disc);
322}
323
324/**
325 * fc_disc_start - Fibre Channel Target discovery
326 * @lport: FC local port
327 *
328 * Returns non-zero if discovery cannot be started.
329 */
330static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
331 enum fc_disc_event),
332 struct fc_lport *lport)
333{
334 struct fc_rport *rport;
335 struct fc_rport_identifiers ids;
336 struct fc_disc *disc = &lport->disc;
337
338 /*
339 * At this point we may have a new disc job or an existing
340 * one. Either way, let's lock when we make changes to it
341 * and send the GPN_FT request.
342 */
343 mutex_lock(&disc->disc_mutex);
344
345 disc->disc_callback = disc_callback;
346
347 /*
348 * If not ready, or already running discovery, just set request flag.
349 */
350 disc->requested = 1;
351
352 if (disc->pending) {
353 mutex_unlock(&disc->disc_mutex);
354 return;
355 }
356
357 /*
358 * Handle point-to-point mode as a simple discovery
359 * of the remote port. Yucky, yucky, yuck, yuck!
360 */
361 rport = disc->lport->ptp_rp;
362 if (rport) {
363 ids.port_id = rport->port_id;
364 ids.port_name = rport->port_name;
365 ids.node_name = rport->node_name;
366 ids.roles = FC_RPORT_ROLE_UNKNOWN;
367 get_device(&rport->dev);
368
369 if (!fc_disc_new_target(disc, rport, &ids)) {
370 disc->event = DISC_EV_SUCCESS;
371 fc_disc_done(disc);
372 }
373 put_device(&rport->dev);
374 } else {
375 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
376 }
377
378 mutex_unlock(&disc->disc_mutex);
379}
380
381static struct fc_rport_operations fc_disc_rport_ops = {
382 .event_callback = fc_disc_rport_callback,
383};
384
385/**
386 * fc_disc_new_target - Handle new target found by discovery
387 * @lport: FC local port
388 * @rport: The previous FC remote port (NULL if new remote port)
389 * @ids: Identifiers for the new FC remote port
390 *
391 * Locking Note: This function expects that the disc_mutex is locked
392 * before it is called.
393 */
394static int fc_disc_new_target(struct fc_disc *disc,
395 struct fc_rport *rport,
396 struct fc_rport_identifiers *ids)
397{
398 struct fc_lport *lport = disc->lport;
399 struct fc_rport_libfc_priv *rp;
400 int error = 0;
401
402 if (rport && ids->port_name) {
403 if (rport->port_name == -1) {
404 /*
405 * Set WWN and fall through to notify of create.
406 */
407 fc_rport_set_name(rport, ids->port_name,
408 rport->node_name);
409 } else if (rport->port_name != ids->port_name) {
410 /*
411 * This is a new port with the same FCID as
412 * a previously-discovered port. Presumably the old
413 * port logged out and a new port logged in and was
414 * assigned the same FCID. This should be rare.
415 * Delete the old one and fall thru to re-create.
416 */
417 fc_disc_del_target(disc, rport);
418 rport = NULL;
419 }
420 }
421 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
422 ids->port_id != fc_host_port_id(lport->host) &&
423 ids->port_name != lport->wwpn) {
424 if (!rport) {
425 rport = lport->tt.rport_lookup(lport, ids->port_id);
426 if (!rport) {
427 struct fc_disc_port dp;
428 dp.lp = lport;
429 dp.ids.port_id = ids->port_id;
430 dp.ids.port_name = ids->port_name;
431 dp.ids.node_name = ids->node_name;
432 dp.ids.roles = ids->roles;
433 rport = fc_rport_rogue_create(&dp);
434 }
435 if (!rport)
436 error = -ENOMEM;
437 }
438 if (rport) {
439 rp = rport->dd_data;
440 rp->ops = &fc_disc_rport_ops;
441 rp->rp_state = RPORT_ST_INIT;
442 lport->tt.rport_login(rport);
443 }
444 }
445 return error;
446}
447
448/**
449 * fc_disc_del_target - Delete a target
450 * @disc: FC discovery context
451 * @rport: The remote port to be removed
452 */
453static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
454{
455 struct fc_lport *lport = disc->lport;
456 struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
457 list_del(&rdata->peers);
458 lport->tt.rport_logoff(rport);
459}
460
461/**
462 * fc_disc_done - Discovery has been completed
463 * @disc: FC discovery context
464 */
465static void fc_disc_done(struct fc_disc *disc)
466{
467 struct fc_lport *lport = disc->lport;
468
469 FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
470 fc_host_port_id(lport->host));
471
472 disc->disc_callback(lport, disc->event);
473 disc->event = DISC_EV_NONE;
474
475 if (disc->requested)
476 fc_disc_gpn_ft_req(disc);
477 else
478 disc->pending = 0;
479}
480
481/**
482 * fc_disc_error - Handle error on dNS request
483 * @disc: FC discovery context
484 * @fp: The frame pointer
485 */
486static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
487{
488 struct fc_lport *lport = disc->lport;
489 unsigned long delay = 0;
490 if (fc_disc_debug)
491 FC_DBG("Error %ld, retries %d/%d\n",
492 PTR_ERR(fp), disc->retry_count,
493 FC_DISC_RETRY_LIMIT);
494
495 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
496 /*
497 * Memory allocation failure, or the exchange timed out,
498 * retry after delay.
499 */
500 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
501 /* go ahead and retry */
502 if (!fp)
503 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
504 else {
505 delay = msecs_to_jiffies(lport->e_d_tov);
506
507 /* timeout faster first time */
508 if (!disc->retry_count)
509 delay /= 4;
510 }
511 disc->retry_count++;
512 schedule_delayed_work(&disc->disc_work, delay);
513 } else {
514 /* exceeded retries */
515 disc->event = DISC_EV_FAILED;
516 fc_disc_done(disc);
517 }
518 }
519}
520
521/**
522 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
523 * @lport: FC discovery context
524 *
525 * Locking Note: This function expects that the disc_mutex is locked
526 * before it is called.
527 */
528static void fc_disc_gpn_ft_req(struct fc_disc *disc)
529{
530 struct fc_frame *fp;
531 struct fc_lport *lport = disc->lport;
532
533 WARN_ON(!fc_lport_test_ready(lport));
534
535 disc->pending = 1;
536 disc->requested = 0;
537
538 disc->buf_len = 0;
539 disc->seq_count = 0;
540 fp = fc_frame_alloc(lport,
541 sizeof(struct fc_ct_hdr) +
542 sizeof(struct fc_ns_gid_ft));
543 if (!fp)
544 goto err;
545
546 if (lport->tt.elsct_send(lport, NULL, fp,
547 FC_NS_GPN_FT,
548 fc_disc_gpn_ft_resp,
549 disc, lport->e_d_tov))
550 return;
551err:
552 fc_disc_error(disc, fp);
553}
554
555/**
556 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
557 * @lport: Fibre Channel host port instance
558 * @buf: GPN_FT response buffer
559 * @len: size of response buffer
560 */
561static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
562{
563 struct fc_lport *lport;
564 struct fc_gpn_ft_resp *np;
565 char *bp;
566 size_t plen;
567 size_t tlen;
568 int error = 0;
569 struct fc_disc_port dp;
570 struct fc_rport *rport;
571 struct fc_rport_libfc_priv *rdata;
572
573 lport = disc->lport;
574
575 /*
576 * Handle partial name record left over from previous call.
577 */
578 bp = buf;
579 plen = len;
580 np = (struct fc_gpn_ft_resp *)bp;
581 tlen = disc->buf_len;
582 if (tlen) {
583 WARN_ON(tlen >= sizeof(*np));
584 plen = sizeof(*np) - tlen;
585 WARN_ON(plen <= 0);
586 WARN_ON(plen >= sizeof(*np));
587 if (plen > len)
588 plen = len;
589 np = &disc->partial_buf;
590 memcpy((char *)np + tlen, bp, plen);
591
592 /*
593 * Set bp so that the loop below will advance it to the
594 * first valid full name element.
595 */
596 bp -= tlen;
597 len += tlen;
598 plen += tlen;
599 disc->buf_len = (unsigned char) plen;
600 if (plen == sizeof(*np))
601 disc->buf_len = 0;
602 }
603
604 /*
605 * Handle full name records, including the one filled from above.
606 * Normally, np == bp and plen == len, but from the partial case above,
607 * bp, len describe the overall buffer, and np, plen describe the
608 * partial buffer, which if would usually be full now.
609 * After the first time through the loop, things return to "normal".
610 */
611 while (plen >= sizeof(*np)) {
612 dp.lp = lport;
613 dp.ids.port_id = ntoh24(np->fp_fid);
614 dp.ids.port_name = ntohll(np->fp_wwpn);
615 dp.ids.node_name = -1;
616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
617
618 if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
619 (dp.ids.port_name != lport->wwpn)) {
620 rport = fc_rport_rogue_create(&dp);
621 if (rport) {
622 rdata = rport->dd_data;
623 rdata->ops = &fc_disc_rport_ops;
624 rdata->local_port = lport;
625 lport->tt.rport_login(rport);
626 } else
627 FC_DBG("Failed to allocate memory for "
628 "the newly discovered port (%6x)\n",
629 dp.ids.port_id);
630 }
631
632 if (np->fp_flags & FC_NS_FID_LAST) {
633 disc->event = DISC_EV_SUCCESS;
634 fc_disc_done(disc);
635 len = 0;
636 break;
637 }
638 len -= sizeof(*np);
639 bp += sizeof(*np);
640 np = (struct fc_gpn_ft_resp *)bp;
641 plen = len;
642 }
643
644 /*
645 * Save any partial record at the end of the buffer for next time.
646 */
647 if (error == 0 && len > 0 && len < sizeof(*np)) {
648 if (np != &disc->partial_buf) {
649 FC_DEBUG_DISC("Partial buffer remains "
650 "for discovery by (%6x)\n",
651 fc_host_port_id(lport->host));
652 memcpy(&disc->partial_buf, np, len);
653 }
654 disc->buf_len = (unsigned char) len;
655 } else {
656 disc->buf_len = 0;
657 }
658 return error;
659}
660
661/*
662 * Handle retry of memory allocation for remote ports.
663 */
664static void fc_disc_timeout(struct work_struct *work)
665{
666 struct fc_disc *disc = container_of(work,
667 struct fc_disc,
668 disc_work.work);
669 mutex_lock(&disc->disc_mutex);
670 if (disc->requested && !disc->pending)
671 fc_disc_gpn_ft_req(disc);
672 mutex_unlock(&disc->disc_mutex);
673}
674
675/**
676 * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
677 * @sp: Current sequence of GPN_FT exchange
678 * @fp: response frame
679 * @lp_arg: Fibre Channel host port instance
680 *
681 * Locking Note: This function expects that the disc_mutex is locked
682 * before it is called.
683 */
684static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
685 void *disc_arg)
686{
687 struct fc_disc *disc = disc_arg;
688 struct fc_ct_hdr *cp;
689 struct fc_frame_header *fh;
690 unsigned int seq_cnt;
691 void *buf = NULL;
692 unsigned int len;
693 int error;
694
695 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
696 fc_host_port_id(disc->lport->host));
697
698 if (IS_ERR(fp)) {
699 fc_disc_error(disc, fp);
700 return;
701 }
702
703 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
704 fh = fc_frame_header_get(fp);
705 len = fr_len(fp) - sizeof(*fh);
706 seq_cnt = ntohs(fh->fh_seq_cnt);
707 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
708 disc->seq_count == 0) {
709 cp = fc_frame_payload_get(fp, sizeof(*cp));
710 if (!cp) {
711 FC_DBG("GPN_FT response too short, len %d\n",
712 fr_len(fp));
713 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
714
715 /*
716 * Accepted. Parse response.
717 */
718 buf = cp + 1;
719 len -= sizeof(*cp);
720 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
721 FC_DBG("GPN_FT rejected reason %x exp %x "
722 "(check zoning)\n", cp->ct_reason,
723 cp->ct_explan);
724 disc->event = DISC_EV_FAILED;
725 fc_disc_done(disc);
726 } else {
727 FC_DBG("GPN_FT unexpected response code %x\n",
728 ntohs(cp->ct_cmd));
729 }
730 } else if (fr_sof(fp) == FC_SOF_N3 &&
731 seq_cnt == disc->seq_count) {
732 buf = fh + 1;
733 } else {
734 FC_DBG("GPN_FT unexpected frame - out of sequence? "
735 "seq_cnt %x expected %x sof %x eof %x\n",
736 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
737 }
738 if (buf) {
739 error = fc_disc_gpn_ft_parse(disc, buf, len);
740 if (error)
741 fc_disc_error(disc, fp);
742 else
743 disc->seq_count++;
744 }
745 fc_frame_free(fp);
746}
747
748/**
749 * fc_disc_single - Discover the directory information for a single target
750 * @lport: FC local port
751 * @dp: The port to rediscover
752 *
753 * Locking Note: This function expects that the disc_mutex is locked
754 * before it is called.
755 */
756static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
757{
758 struct fc_lport *lport;
759 struct fc_rport *rport;
760 struct fc_rport *new_rport;
761 struct fc_rport_libfc_priv *rdata;
762
763 lport = disc->lport;
764
765 if (dp->ids.port_id == fc_host_port_id(lport->host))
766 goto out;
767
768 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
769 if (rport)
770 fc_disc_del_target(disc, rport);
771
772 new_rport = fc_rport_rogue_create(dp);
773 if (new_rport) {
774 rdata = new_rport->dd_data;
775 rdata->ops = &fc_disc_rport_ops;
776 kfree(dp);
777 lport->tt.rport_login(new_rport);
778 }
779 return;
780out:
781 kfree(dp);
782}
783
784/**
785 * fc_disc_stop - Stop discovery for a given lport
786 * @lport: The lport that discovery should stop for
787 */
788void fc_disc_stop(struct fc_lport *lport)
789{
790 struct fc_disc *disc = &lport->disc;
791
792 if (disc) {
793 cancel_delayed_work_sync(&disc->disc_work);
794 fc_disc_stop_rports(disc);
795 }
796}
797
798/**
799 * fc_disc_stop_final - Stop discovery for a given lport
800 * @lport: The lport that discovery should stop for
801 *
802 * This function will block until discovery has been
803 * completely stopped and all rports have been deleted.
804 */
805void fc_disc_stop_final(struct fc_lport *lport)
806{
807 fc_disc_stop(lport);
808 lport->tt.rport_flush_queue();
809}
810
811/**
812 * fc_disc_init - Initialize the discovery block
813 * @lport: FC local port
814 */
815int fc_disc_init(struct fc_lport *lport)
816{
817 struct fc_disc *disc;
818
819 if (!lport->tt.disc_start)
820 lport->tt.disc_start = fc_disc_start;
821
822 if (!lport->tt.disc_stop)
823 lport->tt.disc_stop = fc_disc_stop;
824
825 if (!lport->tt.disc_stop_final)
826 lport->tt.disc_stop_final = fc_disc_stop_final;
827
828 if (!lport->tt.disc_recv_req)
829 lport->tt.disc_recv_req = fc_disc_recv_req;
830
831 if (!lport->tt.rport_lookup)
832 lport->tt.rport_lookup = fc_disc_lookup_rport;
833
834 disc = &lport->disc;
835 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
836 mutex_init(&disc->disc_mutex);
837 INIT_LIST_HEAD(&disc->rports);
838
839 disc->lport = lport;
840 disc->delay = FC_DISC_DELAY;
841 disc->event = DISC_EV_NONE;
842
843 return 0;
844}
845EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000000..dd47fe619d1e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright(c) 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Provide interface to send ELS/CT FC frames
22 */
23
24#include <asm/unaligned.h>
25#include <scsi/fc/fc_gs.h>
26#include <scsi/fc/fc_ns.h>
27#include <scsi/fc/fc_els.h>
28#include <scsi/libfc.h>
29#include <scsi/fc_encode.h>
30
31/*
32 * fc_elsct_send - sends ELS/CT frame
33 */
34static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
35 struct fc_rport *rport,
36 struct fc_frame *fp,
37 unsigned int op,
38 void (*resp)(struct fc_seq *,
39 struct fc_frame *fp,
40 void *arg),
41 void *arg, u32 timer_msec)
42{
43 enum fc_rctl r_ctl;
44 u32 did;
45 enum fc_fh_type fh_type;
46 int rc;
47
48 /* ELS requests */
49 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
50 rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
51 else
52 /* CT requests */
53 rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
54
55 if (rc)
56 return NULL;
57
58 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
59 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
60
61 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
62}
63
64int fc_elsct_init(struct fc_lport *lport)
65{
66 if (!lport->tt.elsct_send)
67 lport->tt.elsct_send = fc_elsct_send;
68
69 return 0;
70}
71EXPORT_SYMBOL(fc_elsct_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
35#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
36
37/*
38 * fc_exch_debug can be set in debugger or at compile time to get more logs.
39 */
40static int fc_exch_debug;
41
42#define FC_DEBUG_EXCH(fmt...) \
43 do { \
44 if (fc_exch_debug) \
45 FC_DBG(fmt); \
46 } while (0)
47
48static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
49
50/*
51 * Structure and function definitions for managing Fibre Channel Exchanges
52 * and Sequences.
53 *
54 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
55 *
56 * fc_exch_mgr holds the exchange state for an N port
57 *
58 * fc_exch holds state for one exchange and links to its active sequence.
59 *
60 * fc_seq holds the state for an individual sequence.
61 */
62
63/*
64 * Exchange manager.
65 *
66 * This structure is the center for creating exchanges and sequences.
67 * It manages the allocation of exchange IDs.
68 */
69struct fc_exch_mgr {
70 enum fc_class class; /* default class for sequences */
71 spinlock_t em_lock; /* exchange manager lock,
72 must be taken before ex_lock */
73 u16 last_xid; /* last allocated exchange ID */
74 u16 min_xid; /* min exchange ID */
75 u16 max_xid; /* max exchange ID */
76 u16 max_read; /* max exchange ID for read */
77 u16 last_read; /* last xid allocated for read */
78 u32 total_exches; /* total allocated exchanges */
79 struct list_head ex_list; /* allocated exchanges list */
80 struct fc_lport *lp; /* fc device instance */
81 mempool_t *ep_pool; /* reserve ep's */
82
83 /*
84 * currently exchange mgr stats are updated but not used.
85 * either stats can be expose via sysfs or remove them
86 * all together if not used XXX
87 */
88 struct {
89 atomic_t no_free_exch;
90 atomic_t no_free_exch_xid;
91 atomic_t xid_not_found;
92 atomic_t xid_busy;
93 atomic_t seq_not_found;
94 atomic_t non_bls_resp;
95 } stats;
96 struct fc_exch **exches; /* for exch pointers indexed by xid */
97};
98#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
99
100static void fc_exch_rrq(struct fc_exch *);
101static void fc_seq_ls_acc(struct fc_seq *);
102static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
103 enum fc_els_rjt_explan);
104static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
105static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
106static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
107
108/*
109 * Internal implementation notes.
110 *
111 * The exchange manager is one by default in libfc but LLD may choose
112 * to have one per CPU. The sequence manager is one per exchange manager
113 * and currently never separated.
114 *
115 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
116 * assigned by the Sequence Initiator that shall be unique for a specific
117 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
118 * qualified by exchange ID, which one might think it would be.
119 * In practice this limits the number of open sequences and exchanges to 256
120 * per session. For most targets we could treat this limit as per exchange.
121 *
122 * The exchange and its sequence are freed when the last sequence is received.
123 * It's possible for the remote port to leave an exchange open without
124 * sending any sequences.
125 *
126 * Notes on reference counts:
127 *
128 * Exchanges are reference counted and exchange gets freed when the reference
129 * count becomes zero.
130 *
131 * Timeouts:
132 * Sequences are timed out for E_D_TOV and R_A_TOV.
133 *
134 * Sequence event handling:
135 *
136 * The following events may occur on initiator sequences:
137 *
138 * Send.
139 * For now, the whole thing is sent.
140 * Receive ACK
141 * This applies only to class F.
142 * The sequence is marked complete.
143 * ULP completion.
144 * The upper layer calls fc_exch_done() when done
145 * with exchange and sequence tuple.
146 * RX-inferred completion.
147 * When we receive the next sequence on the same exchange, we can
148 * retire the previous sequence ID. (XXX not implemented).
149 * Timeout.
150 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
151 * E_D_TOV causes abort and calls upper layer response handler
152 * with FC_EX_TIMEOUT error.
153 * Receive RJT
154 * XXX defer.
155 * Send ABTS
156 * On timeout.
157 *
158 * The following events may occur on recipient sequences:
159 *
160 * Receive
161 * Allocate sequence for first frame received.
162 * Hold during receive handler.
163 * Release when final frame received.
164 * Keep status of last N of these for the ELS RES command. XXX TBD.
165 * Receive ABTS
166 * Deallocate sequence
167 * Send RJT
168 * Deallocate
169 *
170 * For now, we neglect conditions where only part of a sequence was
171 * received or transmitted, or where out-of-order receipt is detected.
172 */
173
174/*
175 * Locking notes:
176 *
177 * The EM code run in a per-CPU worker thread.
178 *
179 * To protect against concurrency between a worker thread code and timers,
180 * sequence allocation and deallocation must be locked.
181 * - exchange refcnt can be done atomicly without locks.
182 * - sequence allocation must be locked by exch lock.
183 * - If the em_lock and ex_lock must be taken at the same time, then the
184 * em_lock must be taken before the ex_lock.
185 */
186
187/*
188 * opcode names for debugging.
189 */
190static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
191
192#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
193
194static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
195 unsigned int max_index)
196{
197 const char *name = NULL;
198
199 if (op < max_index)
200 name = table[op];
201 if (!name)
202 name = "unknown";
203 return name;
204}
205
206static const char *fc_exch_rctl_name(unsigned int op)
207{
208 return fc_exch_name_lookup(op, fc_exch_rctl_names,
209 FC_TABLE_SIZE(fc_exch_rctl_names));
210}
211
212/*
213 * Hold an exchange - keep it from being freed.
214 */
215static void fc_exch_hold(struct fc_exch *ep)
216{
217 atomic_inc(&ep->ex_refcnt);
218}
219
220/*
221 * setup fc hdr by initializing few more FC header fields and sof/eof.
222 * Initialized fields by this func:
223 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
224 * - sof and eof
225 */
226static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
227 u32 f_ctl)
228{
229 struct fc_frame_header *fh = fc_frame_header_get(fp);
230 u16 fill;
231
232 fr_sof(fp) = ep->class;
233 if (ep->seq.cnt)
234 fr_sof(fp) = fc_sof_normal(ep->class);
235
236 if (f_ctl & FC_FC_END_SEQ) {
237 fr_eof(fp) = FC_EOF_T;
238 if (fc_sof_needs_ack(ep->class))
239 fr_eof(fp) = FC_EOF_N;
240 /*
241 * Form f_ctl.
242 * The number of fill bytes to make the length a 4-byte
243 * multiple is the low order 2-bits of the f_ctl.
244 * The fill itself will have been cleared by the frame
245 * allocation.
246 * After this, the length will be even, as expected by
247 * the transport.
248 */
249 fill = fr_len(fp) & 3;
250 if (fill) {
251 fill = 4 - fill;
252 /* TODO, this may be a problem with fragmented skb */
253 skb_put(fp_skb(fp), fill);
254 hton24(fh->fh_f_ctl, f_ctl | fill);
255 }
256 } else {
257 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
258 fr_eof(fp) = FC_EOF_N;
259 }
260
261 /*
262 * Initialize remainig fh fields
263 * from fc_fill_fc_hdr
264 */
265 fh->fh_ox_id = htons(ep->oxid);
266 fh->fh_rx_id = htons(ep->rxid);
267 fh->fh_seq_id = ep->seq.id;
268 fh->fh_seq_cnt = htons(ep->seq.cnt);
269}
270
271
272/*
273 * Release a reference to an exchange.
274 * If the refcnt goes to zero and the exchange is complete, it is freed.
275 */
276static void fc_exch_release(struct fc_exch *ep)
277{
278 struct fc_exch_mgr *mp;
279
280 if (atomic_dec_and_test(&ep->ex_refcnt)) {
281 mp = ep->em;
282 if (ep->destructor)
283 ep->destructor(&ep->seq, ep->arg);
284 if (ep->lp->tt.exch_put)
285 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
286 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
287 mempool_free(ep, mp->ep_pool);
288 }
289}
290
291static int fc_exch_done_locked(struct fc_exch *ep)
292{
293 int rc = 1;
294
295 /*
296 * We must check for completion in case there are two threads
297 * tyring to complete this. But the rrq code will reuse the
298 * ep, and in that case we only clear the resp and set it as
299 * complete, so it can be reused by the timer to send the rrq.
300 */
301 ep->resp = NULL;
302 if (ep->state & FC_EX_DONE)
303 return rc;
304 ep->esb_stat |= ESB_ST_COMPLETE;
305
306 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
307 ep->state |= FC_EX_DONE;
308 if (cancel_delayed_work(&ep->timeout_work))
309 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
310 rc = 0;
311 }
312 return rc;
313}
314
315static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
316{
317 struct fc_exch_mgr *mp;
318
319 mp = ep->em;
320 spin_lock_bh(&mp->em_lock);
321 WARN_ON(mp->total_exches <= 0);
322 mp->total_exches--;
323 mp->exches[ep->xid - mp->min_xid] = NULL;
324 list_del(&ep->ex_list);
325 spin_unlock_bh(&mp->em_lock);
326 fc_exch_release(ep); /* drop hold for exch in mp */
327}
328
329/*
330 * Internal version of fc_exch_timer_set - used with lock held.
331 */
332static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
333 unsigned int timer_msec)
334{
335 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
336 return;
337
338 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
339 ep->xid);
340 if (schedule_delayed_work(&ep->timeout_work,
341 msecs_to_jiffies(timer_msec)))
342 fc_exch_hold(ep); /* hold for timer */
343}
344
345/*
346 * Set timer for an exchange.
347 * The time is a minimum delay in milliseconds until the timer fires.
348 * Used for upper level protocols to time out the exchange.
349 * The timer is cancelled when it fires or when the exchange completes.
350 * Returns non-zero if a timer couldn't be allocated.
351 */
352static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
353{
354 spin_lock_bh(&ep->ex_lock);
355 fc_exch_timer_set_locked(ep, timer_msec);
356 spin_unlock_bh(&ep->ex_lock);
357}
358
359int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
360{
361 struct fc_seq *sp;
362 struct fc_exch *ep;
363 struct fc_frame *fp;
364 int error;
365
366 ep = fc_seq_exch(req_sp);
367
368 spin_lock_bh(&ep->ex_lock);
369 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
370 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
371 spin_unlock_bh(&ep->ex_lock);
372 return -ENXIO;
373 }
374
375 /*
376 * Send the abort on a new sequence if possible.
377 */
378 sp = fc_seq_start_next_locked(&ep->seq);
379 if (!sp) {
380 spin_unlock_bh(&ep->ex_lock);
381 return -ENOMEM;
382 }
383
384 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
385 if (timer_msec)
386 fc_exch_timer_set_locked(ep, timer_msec);
387 spin_unlock_bh(&ep->ex_lock);
388
389 /*
390 * If not logged into the fabric, don't send ABTS but leave
391 * sequence active until next timeout.
392 */
393 if (!ep->sid)
394 return 0;
395
396 /*
397 * Send an abort for the sequence that timed out.
398 */
399 fp = fc_frame_alloc(ep->lp, 0);
400 if (fp) {
401 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
402 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
403 error = fc_seq_send(ep->lp, sp, fp);
404 } else
405 error = -ENOBUFS;
406 return error;
407}
408EXPORT_SYMBOL(fc_seq_exch_abort);
409
410/*
411 * Exchange timeout - handle exchange timer expiration.
412 * The timer will have been cancelled before this is called.
413 */
414static void fc_exch_timeout(struct work_struct *work)
415{
416 struct fc_exch *ep = container_of(work, struct fc_exch,
417 timeout_work.work);
418 struct fc_seq *sp = &ep->seq;
419 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
420 void *arg;
421 u32 e_stat;
422 int rc = 1;
423
424 spin_lock_bh(&ep->ex_lock);
425 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
426 goto unlock;
427
428 e_stat = ep->esb_stat;
429 if (e_stat & ESB_ST_COMPLETE) {
430 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
431 if (e_stat & ESB_ST_REC_QUAL)
432 fc_exch_rrq(ep);
433 spin_unlock_bh(&ep->ex_lock);
434 goto done;
435 } else {
436 resp = ep->resp;
437 arg = ep->arg;
438 ep->resp = NULL;
439 if (e_stat & ESB_ST_ABNORMAL)
440 rc = fc_exch_done_locked(ep);
441 spin_unlock_bh(&ep->ex_lock);
442 if (!rc)
443 fc_exch_mgr_delete_ep(ep);
444 if (resp)
445 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
446 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
447 goto done;
448 }
449unlock:
450 spin_unlock_bh(&ep->ex_lock);
451done:
452 /*
453 * This release matches the hold taken when the timer was set.
454 */
455 fc_exch_release(ep);
456}
457
458/*
459 * Allocate a sequence.
460 *
461 * We don't support multiple originated sequences on the same exchange.
462 * By implication, any previously originated sequence on this exchange
463 * is complete, and we reallocate the same sequence.
464 */
465static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
466{
467 struct fc_seq *sp;
468
469 sp = &ep->seq;
470 sp->ssb_stat = 0;
471 sp->cnt = 0;
472 sp->id = seq_id;
473 return sp;
474}
475
476/*
477 * fc_em_alloc_xid - returns an xid based on request type
478 * @lp : ptr to associated lport
479 * @fp : ptr to the assocated frame
480 *
481 * check the associated fc_fsp_pkt to get scsi command type and
482 * command direction to decide from which range this exch id
483 * will be allocated from.
484 *
485 * Returns : 0 or an valid xid
486 */
487static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
488{
489 u16 xid, min, max;
490 u16 *plast;
491 struct fc_exch *ep = NULL;
492
493 if (mp->max_read) {
494 if (fc_frame_is_read(fp)) {
495 min = mp->min_xid;
496 max = mp->max_read;
497 plast = &mp->last_read;
498 } else {
499 min = mp->max_read + 1;
500 max = mp->max_xid;
501 plast = &mp->last_xid;
502 }
503 } else {
504 min = mp->min_xid;
505 max = mp->max_xid;
506 plast = &mp->last_xid;
507 }
508 xid = *plast;
509 do {
510 xid = (xid == max) ? min : xid + 1;
511 ep = mp->exches[xid - mp->min_xid];
512 } while ((ep != NULL) && (xid != *plast));
513
514 if (unlikely(ep))
515 xid = 0;
516 else
517 *plast = xid;
518
519 return xid;
520}
521
522/*
523 * fc_exch_alloc - allocate an exchange.
524 * @mp : ptr to the exchange manager
525 * @xid: input xid
526 *
527 * if xid is supplied zero then assign next free exchange ID
528 * from exchange manager, otherwise use supplied xid.
529 * Returns with exch lock held.
530 */
531struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
532 struct fc_frame *fp, u16 xid)
533{
534 struct fc_exch *ep;
535
536 /* allocate memory for exchange */
537 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
538 if (!ep) {
539 atomic_inc(&mp->stats.no_free_exch);
540 goto out;
541 }
542 memset(ep, 0, sizeof(*ep));
543
544 spin_lock_bh(&mp->em_lock);
545 /* alloc xid if input xid 0 */
546 if (!xid) {
547 /* alloc a new xid */
548 xid = fc_em_alloc_xid(mp, fp);
549 if (!xid) {
550 printk(KERN_ERR "fc_em_alloc_xid() failed\n");
551 goto err;
552 }
553 }
554
555 fc_exch_hold(ep); /* hold for exch in mp */
556 spin_lock_init(&ep->ex_lock);
557 /*
558 * Hold exch lock for caller to prevent fc_exch_reset()
559 * from releasing exch while fc_exch_alloc() caller is
560 * still working on exch.
561 */
562 spin_lock_bh(&ep->ex_lock);
563
564 mp->exches[xid - mp->min_xid] = ep;
565 list_add_tail(&ep->ex_list, &mp->ex_list);
566 fc_seq_alloc(ep, ep->seq_id++);
567 mp->total_exches++;
568 spin_unlock_bh(&mp->em_lock);
569
570 /*
571 * update exchange
572 */
573 ep->oxid = ep->xid = xid;
574 ep->em = mp;
575 ep->lp = mp->lp;
576 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
577 ep->rxid = FC_XID_UNKNOWN;
578 ep->class = mp->class;
579 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
580out:
581 return ep;
582err:
583 spin_unlock_bh(&mp->em_lock);
584 atomic_inc(&mp->stats.no_free_exch_xid);
585 mempool_free(ep, mp->ep_pool);
586 return NULL;
587}
588EXPORT_SYMBOL(fc_exch_alloc);
589
590/*
591 * Lookup and hold an exchange.
592 */
593static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
594{
595 struct fc_exch *ep = NULL;
596
597 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
598 spin_lock_bh(&mp->em_lock);
599 ep = mp->exches[xid - mp->min_xid];
600 if (ep) {
601 fc_exch_hold(ep);
602 WARN_ON(ep->xid != xid);
603 }
604 spin_unlock_bh(&mp->em_lock);
605 }
606 return ep;
607}
608
609void fc_exch_done(struct fc_seq *sp)
610{
611 struct fc_exch *ep = fc_seq_exch(sp);
612 int rc;
613
614 spin_lock_bh(&ep->ex_lock);
615 rc = fc_exch_done_locked(ep);
616 spin_unlock_bh(&ep->ex_lock);
617 if (!rc)
618 fc_exch_mgr_delete_ep(ep);
619}
620EXPORT_SYMBOL(fc_exch_done);
621
622/*
623 * Allocate a new exchange as responder.
624 * Sets the responder ID in the frame header.
625 */
626static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
627{
628 struct fc_exch *ep;
629 struct fc_frame_header *fh;
630 u16 rxid;
631
632 ep = mp->lp->tt.exch_get(mp->lp, fp);
633 if (ep) {
634 ep->class = fc_frame_class(fp);
635
636 /*
637 * Set EX_CTX indicating we're responding on this exchange.
638 */
639 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
640 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
641 fh = fc_frame_header_get(fp);
642 ep->sid = ntoh24(fh->fh_d_id);
643 ep->did = ntoh24(fh->fh_s_id);
644 ep->oid = ep->did;
645
646 /*
647 * Allocated exchange has placed the XID in the
648 * originator field. Move it to the responder field,
649 * and set the originator XID from the frame.
650 */
651 ep->rxid = ep->xid;
652 ep->oxid = ntohs(fh->fh_ox_id);
653 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
654 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
655 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
656
657 /*
658 * Set the responder ID in the frame header.
659 * The old one should've been 0xffff.
660 * If it isn't, don't assign one.
661 * Incoming basic link service frames may specify
662 * a referenced RX_ID.
663 */
664 if (fh->fh_type != FC_TYPE_BLS) {
665 rxid = ntohs(fh->fh_rx_id);
666 WARN_ON(rxid != FC_XID_UNKNOWN);
667 fh->fh_rx_id = htons(ep->rxid);
668 }
669 fc_exch_hold(ep); /* hold for caller */
670 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
671 }
672 return ep;
673}
674
675/*
676 * Find a sequence for receive where the other end is originating the sequence.
677 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
678 * on the ep that should be released by the caller.
679 */
680static enum fc_pf_rjt_reason
681fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
682{
683 struct fc_frame_header *fh = fc_frame_header_get(fp);
684 struct fc_exch *ep = NULL;
685 struct fc_seq *sp = NULL;
686 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
687 u32 f_ctl;
688 u16 xid;
689
690 f_ctl = ntoh24(fh->fh_f_ctl);
691 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
692
693 /*
694 * Lookup or create the exchange if we will be creating the sequence.
695 */
696 if (f_ctl & FC_FC_EX_CTX) {
697 xid = ntohs(fh->fh_ox_id); /* we originated exch */
698 ep = fc_exch_find(mp, xid);
699 if (!ep) {
700 atomic_inc(&mp->stats.xid_not_found);
701 reject = FC_RJT_OX_ID;
702 goto out;
703 }
704 if (ep->rxid == FC_XID_UNKNOWN)
705 ep->rxid = ntohs(fh->fh_rx_id);
706 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
707 reject = FC_RJT_OX_ID;
708 goto rel;
709 }
710 } else {
711 xid = ntohs(fh->fh_rx_id); /* we are the responder */
712
713 /*
714 * Special case for MDS issuing an ELS TEST with a
715 * bad rxid of 0.
716 * XXX take this out once we do the proper reject.
717 */
718 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
719 fc_frame_payload_op(fp) == ELS_TEST) {
720 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
721 xid = FC_XID_UNKNOWN;
722 }
723
724 /*
725 * new sequence - find the exchange
726 */
727 ep = fc_exch_find(mp, xid);
728 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
729 if (ep) {
730 atomic_inc(&mp->stats.xid_busy);
731 reject = FC_RJT_RX_ID;
732 goto rel;
733 }
734 ep = fc_exch_resp(mp, fp);
735 if (!ep) {
736 reject = FC_RJT_EXCH_EST; /* XXX */
737 goto out;
738 }
739 xid = ep->xid; /* get our XID */
740 } else if (!ep) {
741 atomic_inc(&mp->stats.xid_not_found);
742 reject = FC_RJT_RX_ID; /* XID not found */
743 goto out;
744 }
745 }
746
747 /*
748 * At this point, we have the exchange held.
749 * Find or create the sequence.
750 */
751 if (fc_sof_is_init(fr_sof(fp))) {
752 sp = fc_seq_start_next(&ep->seq);
753 if (!sp) {
754 reject = FC_RJT_SEQ_XS; /* exchange shortage */
755 goto rel;
756 }
757 sp->id = fh->fh_seq_id;
758 sp->ssb_stat |= SSB_ST_RESP;
759 } else {
760 sp = &ep->seq;
761 if (sp->id != fh->fh_seq_id) {
762 atomic_inc(&mp->stats.seq_not_found);
763 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
764 goto rel;
765 }
766 }
767 WARN_ON(ep != fc_seq_exch(sp));
768
769 if (f_ctl & FC_FC_SEQ_INIT)
770 ep->esb_stat |= ESB_ST_SEQ_INIT;
771
772 fr_seq(fp) = sp;
773out:
774 return reject;
775rel:
776 fc_exch_done(&ep->seq);
777 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
778 return reject;
779}
780
781/*
782 * Find the sequence for a frame being received.
783 * We originated the sequence, so it should be found.
784 * We may or may not have originated the exchange.
785 * Does not hold the sequence for the caller.
786 */
787static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
788 struct fc_frame *fp)
789{
790 struct fc_frame_header *fh = fc_frame_header_get(fp);
791 struct fc_exch *ep;
792 struct fc_seq *sp = NULL;
793 u32 f_ctl;
794 u16 xid;
795
796 f_ctl = ntoh24(fh->fh_f_ctl);
797 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
798 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
799 ep = fc_exch_find(mp, xid);
800 if (!ep)
801 return NULL;
802 if (ep->seq.id == fh->fh_seq_id) {
803 /*
804 * Save the RX_ID if we didn't previously know it.
805 */
806 sp = &ep->seq;
807 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
808 ep->rxid == FC_XID_UNKNOWN) {
809 ep->rxid = ntohs(fh->fh_rx_id);
810 }
811 }
812 fc_exch_release(ep);
813 return sp;
814}
815
816/*
817 * Set addresses for an exchange.
818 * Note this must be done before the first sequence of the exchange is sent.
819 */
820static void fc_exch_set_addr(struct fc_exch *ep,
821 u32 orig_id, u32 resp_id)
822{
823 ep->oid = orig_id;
824 if (ep->esb_stat & ESB_ST_RESP) {
825 ep->sid = resp_id;
826 ep->did = orig_id;
827 } else {
828 ep->sid = orig_id;
829 ep->did = resp_id;
830 }
831}
832
833static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
834{
835 struct fc_exch *ep = fc_seq_exch(sp);
836
837 sp = fc_seq_alloc(ep, ep->seq_id++);
838 FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
839 ep->xid, ep->f_ctl, sp->id);
840 return sp;
841}
842/*
843 * Allocate a new sequence on the same exchange as the supplied sequence.
844 * This will never return NULL.
845 */
846struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
847{
848 struct fc_exch *ep = fc_seq_exch(sp);
849
850 spin_lock_bh(&ep->ex_lock);
851 WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
852 sp = fc_seq_start_next_locked(sp);
853 spin_unlock_bh(&ep->ex_lock);
854
855 return sp;
856}
857EXPORT_SYMBOL(fc_seq_start_next);
858
859int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
860{
861 struct fc_exch *ep;
862 struct fc_frame_header *fh = fc_frame_header_get(fp);
863 int error;
864 u32 f_ctl;
865
866 ep = fc_seq_exch(sp);
867 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
868
869 f_ctl = ntoh24(fh->fh_f_ctl);
870 fc_exch_setup_hdr(ep, fp, f_ctl);
871
872 /*
873 * update sequence count if this frame is carrying
874 * multiple FC frames when sequence offload is enabled
875 * by LLD.
876 */
877 if (fr_max_payload(fp))
878 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
879 fr_max_payload(fp));
880 else
881 sp->cnt++;
882
883 /*
884 * Send the frame.
885 */
886 error = lp->tt.frame_send(lp, fp);
887
888 /*
889 * Update the exchange and sequence flags,
890 * assuming all frames for the sequence have been sent.
891 * We can only be called to send once for each sequence.
892 */
893 spin_lock_bh(&ep->ex_lock);
894 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
895 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
896 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
897 spin_unlock_bh(&ep->ex_lock);
898 return error;
899}
900EXPORT_SYMBOL(fc_seq_send);
901
902void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
903 struct fc_seq_els_data *els_data)
904{
905 switch (els_cmd) {
906 case ELS_LS_RJT:
907 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
908 break;
909 case ELS_LS_ACC:
910 fc_seq_ls_acc(sp);
911 break;
912 case ELS_RRQ:
913 fc_exch_els_rrq(sp, els_data->fp);
914 break;
915 case ELS_REC:
916 fc_exch_els_rec(sp, els_data->fp);
917 break;
918 default:
919 FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
920 }
921}
922EXPORT_SYMBOL(fc_seq_els_rsp_send);
923
924/*
925 * Send a sequence, which is also the last sequence in the exchange.
926 */
927static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
928 enum fc_rctl rctl, enum fc_fh_type fh_type)
929{
930 u32 f_ctl;
931 struct fc_exch *ep = fc_seq_exch(sp);
932
933 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
934 f_ctl |= ep->f_ctl;
935 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
936 fc_seq_send(ep->lp, sp, fp);
937}
938
939/*
940 * Send ACK_1 (or equiv.) indicating we received something.
941 * The frame we're acking is supplied.
942 */
943static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
944{
945 struct fc_frame *fp;
946 struct fc_frame_header *rx_fh;
947 struct fc_frame_header *fh;
948 struct fc_exch *ep = fc_seq_exch(sp);
949 struct fc_lport *lp = ep->lp;
950 unsigned int f_ctl;
951
952 /*
953 * Don't send ACKs for class 3.
954 */
955 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
956 fp = fc_frame_alloc(lp, 0);
957 if (!fp)
958 return;
959
960 fh = fc_frame_header_get(fp);
961 fh->fh_r_ctl = FC_RCTL_ACK_1;
962 fh->fh_type = FC_TYPE_BLS;
963
964 /*
965 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
966 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
967 * Bits 9-8 are meaningful (retransmitted or unidirectional).
968 * Last ACK uses bits 7-6 (continue sequence),
969 * bits 5-4 are meaningful (what kind of ACK to use).
970 */
971 rx_fh = fc_frame_header_get(rx_fp);
972 f_ctl = ntoh24(rx_fh->fh_f_ctl);
973 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
974 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
975 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
976 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
977 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
978 hton24(fh->fh_f_ctl, f_ctl);
979
980 fc_exch_setup_hdr(ep, fp, f_ctl);
981 fh->fh_seq_id = rx_fh->fh_seq_id;
982 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
983 fh->fh_parm_offset = htonl(1); /* ack single frame */
984
985 fr_sof(fp) = fr_sof(rx_fp);
986 if (f_ctl & FC_FC_END_SEQ)
987 fr_eof(fp) = FC_EOF_T;
988 else
989 fr_eof(fp) = FC_EOF_N;
990
991 (void) lp->tt.frame_send(lp, fp);
992 }
993}
994
995/*
996 * Send BLS Reject.
997 * This is for rejecting BA_ABTS only.
998 */
999static void
1000fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
1001 enum fc_ba_rjt_explan explan)
1002{
1003 struct fc_frame *fp;
1004 struct fc_frame_header *rx_fh;
1005 struct fc_frame_header *fh;
1006 struct fc_ba_rjt *rp;
1007 struct fc_lport *lp;
1008 unsigned int f_ctl;
1009
1010 lp = fr_dev(rx_fp);
1011 fp = fc_frame_alloc(lp, sizeof(*rp));
1012 if (!fp)
1013 return;
1014 fh = fc_frame_header_get(fp);
1015 rx_fh = fc_frame_header_get(rx_fp);
1016
1017 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1018
1019 rp = fc_frame_payload_get(fp, sizeof(*rp));
1020 rp->br_reason = reason;
1021 rp->br_explan = explan;
1022
1023 /*
1024 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1025 */
1026 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1027 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1028 fh->fh_ox_id = rx_fh->fh_rx_id;
1029 fh->fh_rx_id = rx_fh->fh_ox_id;
1030 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1031 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1032 fh->fh_type = FC_TYPE_BLS;
1033
1034 /*
1035 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1036 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1037 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1038 * Last ACK uses bits 7-6 (continue sequence),
1039 * bits 5-4 are meaningful (what kind of ACK to use).
1040 * Always set LAST_SEQ, END_SEQ.
1041 */
1042 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1043 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1044 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1045 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1046 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1047 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1048 f_ctl &= ~FC_FC_FIRST_SEQ;
1049 hton24(fh->fh_f_ctl, f_ctl);
1050
1051 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1052 fr_eof(fp) = FC_EOF_T;
1053 if (fc_sof_needs_ack(fr_sof(fp)))
1054 fr_eof(fp) = FC_EOF_N;
1055
1056 (void) lp->tt.frame_send(lp, fp);
1057}
1058
1059/*
1060 * Handle an incoming ABTS. This would be for target mode usually,
1061 * but could be due to lost FCP transfer ready, confirm or RRQ.
1062 * We always handle this as an exchange abort, ignoring the parameter.
1063 */
1064static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1065{
1066 struct fc_frame *fp;
1067 struct fc_ba_acc *ap;
1068 struct fc_frame_header *fh;
1069 struct fc_seq *sp;
1070
1071 if (!ep)
1072 goto reject;
1073 spin_lock_bh(&ep->ex_lock);
1074 if (ep->esb_stat & ESB_ST_COMPLETE) {
1075 spin_unlock_bh(&ep->ex_lock);
1076 goto reject;
1077 }
1078 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1079 fc_exch_hold(ep); /* hold for REC_QUAL */
1080 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1081 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1082
1083 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1084 if (!fp) {
1085 spin_unlock_bh(&ep->ex_lock);
1086 goto free;
1087 }
1088 fh = fc_frame_header_get(fp);
1089 ap = fc_frame_payload_get(fp, sizeof(*ap));
1090 memset(ap, 0, sizeof(*ap));
1091 sp = &ep->seq;
1092 ap->ba_high_seq_cnt = htons(0xffff);
1093 if (sp->ssb_stat & SSB_ST_RESP) {
1094 ap->ba_seq_id = sp->id;
1095 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1096 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1097 ap->ba_low_seq_cnt = htons(sp->cnt);
1098 }
1099 sp = fc_seq_start_next(sp);
1100 spin_unlock_bh(&ep->ex_lock);
1101 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1102 fc_frame_free(rx_fp);
1103 return;
1104
1105reject:
1106 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1107free:
1108 fc_frame_free(rx_fp);
1109}
1110
1111/*
1112 * Handle receive where the other end is originating the sequence.
1113 */
1114static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1115 struct fc_frame *fp)
1116{
1117 struct fc_frame_header *fh = fc_frame_header_get(fp);
1118 struct fc_seq *sp = NULL;
1119 struct fc_exch *ep = NULL;
1120 enum fc_sof sof;
1121 enum fc_eof eof;
1122 u32 f_ctl;
1123 enum fc_pf_rjt_reason reject;
1124
1125 fr_seq(fp) = NULL;
1126 reject = fc_seq_lookup_recip(mp, fp);
1127 if (reject == FC_RJT_NONE) {
1128 sp = fr_seq(fp); /* sequence will be held */
1129 ep = fc_seq_exch(sp);
1130 sof = fr_sof(fp);
1131 eof = fr_eof(fp);
1132 f_ctl = ntoh24(fh->fh_f_ctl);
1133 fc_seq_send_ack(sp, fp);
1134
1135 /*
1136 * Call the receive function.
1137 *
1138 * The receive function may allocate a new sequence
1139 * over the old one, so we shouldn't change the
1140 * sequence after this.
1141 *
1142 * The frame will be freed by the receive function.
1143 * If new exch resp handler is valid then call that
1144 * first.
1145 */
1146 if (ep->resp)
1147 ep->resp(sp, fp, ep->arg);
1148 else
1149 lp->tt.lport_recv(lp, sp, fp);
1150 fc_exch_release(ep); /* release from lookup */
1151 } else {
1152 FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
1153 fc_frame_free(fp);
1154 }
1155}
1156
1157/*
1158 * Handle receive where the other end is originating the sequence in
1159 * response to our exchange.
1160 */
1161static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1162{
1163 struct fc_frame_header *fh = fc_frame_header_get(fp);
1164 struct fc_seq *sp;
1165 struct fc_exch *ep;
1166 enum fc_sof sof;
1167 u32 f_ctl;
1168 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1169 void *ex_resp_arg;
1170 int rc;
1171
1172 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1173 if (!ep) {
1174 atomic_inc(&mp->stats.xid_not_found);
1175 goto out;
1176 }
1177 if (ep->rxid == FC_XID_UNKNOWN)
1178 ep->rxid = ntohs(fh->fh_rx_id);
1179 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1180 atomic_inc(&mp->stats.xid_not_found);
1181 goto rel;
1182 }
1183 if (ep->did != ntoh24(fh->fh_s_id) &&
1184 ep->did != FC_FID_FLOGI) {
1185 atomic_inc(&mp->stats.xid_not_found);
1186 goto rel;
1187 }
1188 sof = fr_sof(fp);
1189 if (fc_sof_is_init(sof)) {
1190 sp = fc_seq_start_next(&ep->seq);
1191 sp->id = fh->fh_seq_id;
1192 sp->ssb_stat |= SSB_ST_RESP;
1193 } else {
1194 sp = &ep->seq;
1195 if (sp->id != fh->fh_seq_id) {
1196 atomic_inc(&mp->stats.seq_not_found);
1197 goto rel;
1198 }
1199 }
1200 f_ctl = ntoh24(fh->fh_f_ctl);
1201 fr_seq(fp) = sp;
1202 if (f_ctl & FC_FC_SEQ_INIT)
1203 ep->esb_stat |= ESB_ST_SEQ_INIT;
1204
1205 if (fc_sof_needs_ack(sof))
1206 fc_seq_send_ack(sp, fp);
1207 resp = ep->resp;
1208 ex_resp_arg = ep->arg;
1209
1210 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1211 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1212 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1213 spin_lock_bh(&ep->ex_lock);
1214 rc = fc_exch_done_locked(ep);
1215 WARN_ON(fc_seq_exch(sp) != ep);
1216 spin_unlock_bh(&ep->ex_lock);
1217 if (!rc)
1218 fc_exch_mgr_delete_ep(ep);
1219 }
1220
1221 /*
1222 * Call the receive function.
1223 * The sequence is held (has a refcnt) for us,
1224 * but not for the receive function.
1225 *
1226 * The receive function may allocate a new sequence
1227 * over the old one, so we shouldn't change the
1228 * sequence after this.
1229 *
1230 * The frame will be freed by the receive function.
1231 * If new exch resp handler is valid then call that
1232 * first.
1233 */
1234 if (resp)
1235 resp(sp, fp, ex_resp_arg);
1236 else
1237 fc_frame_free(fp);
1238 fc_exch_release(ep);
1239 return;
1240rel:
1241 fc_exch_release(ep);
1242out:
1243 fc_frame_free(fp);
1244}
1245
1246/*
1247 * Handle receive for a sequence where other end is responding to our sequence.
1248 */
1249static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1250{
1251 struct fc_seq *sp;
1252
1253 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1254 if (!sp) {
1255 atomic_inc(&mp->stats.xid_not_found);
1256 FC_DEBUG_EXCH("seq lookup failed\n");
1257 } else {
1258 atomic_inc(&mp->stats.non_bls_resp);
1259 FC_DEBUG_EXCH("non-BLS response to sequence");
1260 }
1261 fc_frame_free(fp);
1262}
1263
1264/*
1265 * Handle the response to an ABTS for exchange or sequence.
1266 * This can be BA_ACC or BA_RJT.
1267 */
1268static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1269{
1270 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1271 void *ex_resp_arg;
1272 struct fc_frame_header *fh;
1273 struct fc_ba_acc *ap;
1274 struct fc_seq *sp;
1275 u16 low;
1276 u16 high;
1277 int rc = 1, has_rec = 0;
1278
1279 fh = fc_frame_header_get(fp);
1280 FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
1281 fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
1282
1283 if (cancel_delayed_work_sync(&ep->timeout_work))
1284 fc_exch_release(ep); /* release from pending timer hold */
1285
1286 spin_lock_bh(&ep->ex_lock);
1287 switch (fh->fh_r_ctl) {
1288 case FC_RCTL_BA_ACC:
1289 ap = fc_frame_payload_get(fp, sizeof(*ap));
1290 if (!ap)
1291 break;
1292
1293 /*
1294 * Decide whether to establish a Recovery Qualifier.
1295 * We do this if there is a non-empty SEQ_CNT range and
1296 * SEQ_ID is the same as the one we aborted.
1297 */
1298 low = ntohs(ap->ba_low_seq_cnt);
1299 high = ntohs(ap->ba_high_seq_cnt);
1300 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1301 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1302 ap->ba_seq_id == ep->seq_id) && low != high) {
1303 ep->esb_stat |= ESB_ST_REC_QUAL;
1304 fc_exch_hold(ep); /* hold for recovery qualifier */
1305 has_rec = 1;
1306 }
1307 break;
1308 case FC_RCTL_BA_RJT:
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 resp = ep->resp;
1315 ex_resp_arg = ep->arg;
1316
1317 /* do we need to do some other checks here. Can we reuse more of
1318 * fc_exch_recv_seq_resp
1319 */
1320 sp = &ep->seq;
1321 /*
1322 * do we want to check END_SEQ as well as LAST_SEQ here?
1323 */
1324 if (ep->fh_type != FC_TYPE_FCP &&
1325 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1326 rc = fc_exch_done_locked(ep);
1327 spin_unlock_bh(&ep->ex_lock);
1328 if (!rc)
1329 fc_exch_mgr_delete_ep(ep);
1330
1331 if (resp)
1332 resp(sp, fp, ex_resp_arg);
1333 else
1334 fc_frame_free(fp);
1335
1336 if (has_rec)
1337 fc_exch_timer_set(ep, ep->r_a_tov);
1338
1339}
1340
1341/*
1342 * Receive BLS sequence.
1343 * This is always a sequence initiated by the remote side.
1344 * We may be either the originator or recipient of the exchange.
1345 */
1346static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1347{
1348 struct fc_frame_header *fh;
1349 struct fc_exch *ep;
1350 u32 f_ctl;
1351
1352 fh = fc_frame_header_get(fp);
1353 f_ctl = ntoh24(fh->fh_f_ctl);
1354 fr_seq(fp) = NULL;
1355
1356 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1357 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1358 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1359 spin_lock_bh(&ep->ex_lock);
1360 ep->esb_stat |= ESB_ST_SEQ_INIT;
1361 spin_unlock_bh(&ep->ex_lock);
1362 }
1363 if (f_ctl & FC_FC_SEQ_CTX) {
1364 /*
1365 * A response to a sequence we initiated.
1366 * This should only be ACKs for class 2 or F.
1367 */
1368 switch (fh->fh_r_ctl) {
1369 case FC_RCTL_ACK_1:
1370 case FC_RCTL_ACK_0:
1371 break;
1372 default:
1373 FC_DEBUG_EXCH("BLS rctl %x - %s received",
1374 fh->fh_r_ctl,
1375 fc_exch_rctl_name(fh->fh_r_ctl));
1376 break;
1377 }
1378 fc_frame_free(fp);
1379 } else {
1380 switch (fh->fh_r_ctl) {
1381 case FC_RCTL_BA_RJT:
1382 case FC_RCTL_BA_ACC:
1383 if (ep)
1384 fc_exch_abts_resp(ep, fp);
1385 else
1386 fc_frame_free(fp);
1387 break;
1388 case FC_RCTL_BA_ABTS:
1389 fc_exch_recv_abts(ep, fp);
1390 break;
1391 default: /* ignore junk */
1392 fc_frame_free(fp);
1393 break;
1394 }
1395 }
1396 if (ep)
1397 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1398}
1399
1400/*
1401 * Accept sequence with LS_ACC.
1402 * If this fails due to allocation or transmit congestion, assume the
1403 * originator will repeat the sequence.
1404 */
1405static void fc_seq_ls_acc(struct fc_seq *req_sp)
1406{
1407 struct fc_seq *sp;
1408 struct fc_els_ls_acc *acc;
1409 struct fc_frame *fp;
1410
1411 sp = fc_seq_start_next(req_sp);
1412 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1413 if (fp) {
1414 acc = fc_frame_payload_get(fp, sizeof(*acc));
1415 memset(acc, 0, sizeof(*acc));
1416 acc->la_cmd = ELS_LS_ACC;
1417 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1418 }
1419}
1420
1421/*
1422 * Reject sequence with ELS LS_RJT.
1423 * If this fails due to allocation or transmit congestion, assume the
1424 * originator will repeat the sequence.
1425 */
1426static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1427 enum fc_els_rjt_explan explan)
1428{
1429 struct fc_seq *sp;
1430 struct fc_els_ls_rjt *rjt;
1431 struct fc_frame *fp;
1432
1433 sp = fc_seq_start_next(req_sp);
1434 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1435 if (fp) {
1436 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1437 memset(rjt, 0, sizeof(*rjt));
1438 rjt->er_cmd = ELS_LS_RJT;
1439 rjt->er_reason = reason;
1440 rjt->er_explan = explan;
1441 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1442 }
1443}
1444
1445static void fc_exch_reset(struct fc_exch *ep)
1446{
1447 struct fc_seq *sp;
1448 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1449 void *arg;
1450 int rc = 1;
1451
1452 spin_lock_bh(&ep->ex_lock);
1453 ep->state |= FC_EX_RST_CLEANUP;
1454 /*
1455 * we really want to call del_timer_sync, but cannot due
1456 * to the lport calling with the lport lock held (some resp
1457 * functions can also grab the lport lock which could cause
1458 * a deadlock).
1459 */
1460 if (cancel_delayed_work(&ep->timeout_work))
1461 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1462 resp = ep->resp;
1463 ep->resp = NULL;
1464 if (ep->esb_stat & ESB_ST_REC_QUAL)
1465 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1466 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1467 arg = ep->arg;
1468 sp = &ep->seq;
1469 rc = fc_exch_done_locked(ep);
1470 spin_unlock_bh(&ep->ex_lock);
1471 if (!rc)
1472 fc_exch_mgr_delete_ep(ep);
1473
1474 if (resp)
1475 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1476}
1477
1478/*
1479 * Reset an exchange manager, releasing all sequences and exchanges.
1480 * If sid is non-zero, reset only exchanges we source from that FID.
1481 * If did is non-zero, reset only exchanges destined to that FID.
1482 */
1483void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
1484{
1485 struct fc_exch *ep;
1486 struct fc_exch *next;
1487
1488 spin_lock_bh(&mp->em_lock);
1489restart:
1490 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1491 if ((sid == 0 || sid == ep->sid) &&
1492 (did == 0 || did == ep->did)) {
1493 fc_exch_hold(ep);
1494 spin_unlock_bh(&mp->em_lock);
1495
1496 fc_exch_reset(ep);
1497
1498 fc_exch_release(ep);
1499 spin_lock_bh(&mp->em_lock);
1500
1501 /*
1502 * must restart loop incase while lock was down
1503 * multiple eps were released.
1504 */
1505 goto restart;
1506 }
1507 }
1508 spin_unlock_bh(&mp->em_lock);
1509}
1510EXPORT_SYMBOL(fc_exch_mgr_reset);
1511
1512/*
1513 * Handle incoming ELS REC - Read Exchange Concise.
1514 * Note that the requesting port may be different than the S_ID in the request.
1515 */
1516static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1517{
1518 struct fc_frame *fp;
1519 struct fc_exch *ep;
1520 struct fc_exch_mgr *em;
1521 struct fc_els_rec *rp;
1522 struct fc_els_rec_acc *acc;
1523 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1524 enum fc_els_rjt_explan explan;
1525 u32 sid;
1526 u16 rxid;
1527 u16 oxid;
1528
1529 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1530 explan = ELS_EXPL_INV_LEN;
1531 if (!rp)
1532 goto reject;
1533 sid = ntoh24(rp->rec_s_id);
1534 rxid = ntohs(rp->rec_rx_id);
1535 oxid = ntohs(rp->rec_ox_id);
1536
1537 /*
1538 * Currently it's hard to find the local S_ID from the exchange
1539 * manager. This will eventually be fixed, but for now it's easier
1540 * to lookup the subject exchange twice, once as if we were
1541 * the initiator, and then again if we weren't.
1542 */
1543 em = fc_seq_exch(sp)->em;
1544 ep = fc_exch_find(em, oxid);
1545 explan = ELS_EXPL_OXID_RXID;
1546 if (ep && ep->oid == sid) {
1547 if (ep->rxid != FC_XID_UNKNOWN &&
1548 rxid != FC_XID_UNKNOWN &&
1549 ep->rxid != rxid)
1550 goto rel;
1551 } else {
1552 if (ep)
1553 fc_exch_release(ep);
1554 ep = NULL;
1555 if (rxid != FC_XID_UNKNOWN)
1556 ep = fc_exch_find(em, rxid);
1557 if (!ep)
1558 goto reject;
1559 }
1560
1561 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1562 if (!fp) {
1563 fc_exch_done(sp);
1564 goto out;
1565 }
1566 sp = fc_seq_start_next(sp);
1567 acc = fc_frame_payload_get(fp, sizeof(*acc));
1568 memset(acc, 0, sizeof(*acc));
1569 acc->reca_cmd = ELS_LS_ACC;
1570 acc->reca_ox_id = rp->rec_ox_id;
1571 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1572 acc->reca_rx_id = htons(ep->rxid);
1573 if (ep->sid == ep->oid)
1574 hton24(acc->reca_rfid, ep->did);
1575 else
1576 hton24(acc->reca_rfid, ep->sid);
1577 acc->reca_fc4value = htonl(ep->seq.rec_data);
1578 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1579 ESB_ST_SEQ_INIT |
1580 ESB_ST_COMPLETE));
1581 sp = fc_seq_start_next(sp);
1582 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1583out:
1584 fc_exch_release(ep);
1585 fc_frame_free(rfp);
1586 return;
1587
1588rel:
1589 fc_exch_release(ep);
1590reject:
1591 fc_seq_ls_rjt(sp, reason, explan);
1592 fc_frame_free(rfp);
1593}
1594
1595/*
1596 * Handle response from RRQ.
1597 * Not much to do here, really.
1598 * Should report errors.
1599 *
1600 * TODO: fix error handler.
1601 */
1602static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1603{
1604 struct fc_exch *aborted_ep = arg;
1605 unsigned int op;
1606
1607 if (IS_ERR(fp)) {
1608 int err = PTR_ERR(fp);
1609
1610 if (err == -FC_EX_CLOSED)
1611 goto cleanup;
1612 FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
1613 return;
1614 }
1615
1616 op = fc_frame_payload_op(fp);
1617 fc_frame_free(fp);
1618
1619 switch (op) {
1620 case ELS_LS_RJT:
1621 FC_DBG("LS_RJT for RRQ");
1622 /* fall through */
1623 case ELS_LS_ACC:
1624 goto cleanup;
1625 default:
1626 FC_DBG("unexpected response op %x for RRQ", op);
1627 return;
1628 }
1629
1630cleanup:
1631 fc_exch_done(&aborted_ep->seq);
1632 /* drop hold for rec qual */
1633 fc_exch_release(aborted_ep);
1634}
1635
1636/*
1637 * Send ELS RRQ - Reinstate Recovery Qualifier.
1638 * This tells the remote port to stop blocking the use of
1639 * the exchange and the seq_cnt range.
1640 */
1641static void fc_exch_rrq(struct fc_exch *ep)
1642{
1643 struct fc_lport *lp;
1644 struct fc_els_rrq *rrq;
1645 struct fc_frame *fp;
1646 struct fc_seq *rrq_sp;
1647 u32 did;
1648
1649 lp = ep->lp;
1650
1651 fp = fc_frame_alloc(lp, sizeof(*rrq));
1652 if (!fp)
1653 return;
1654 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1655 memset(rrq, 0, sizeof(*rrq));
1656 rrq->rrq_cmd = ELS_RRQ;
1657 hton24(rrq->rrq_s_id, ep->sid);
1658 rrq->rrq_ox_id = htons(ep->oxid);
1659 rrq->rrq_rx_id = htons(ep->rxid);
1660
1661 did = ep->did;
1662 if (ep->esb_stat & ESB_ST_RESP)
1663 did = ep->sid;
1664
1665 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1666 fc_host_port_id(lp->host), FC_TYPE_ELS,
1667 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1668
1669 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
1670 lp->e_d_tov);
1671 if (!rrq_sp) {
1672 ep->esb_stat |= ESB_ST_REC_QUAL;
1673 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1674 return;
1675 }
1676}
1677
1678
1679/*
1680 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1681 */
1682static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1683{
1684 struct fc_exch *ep; /* request or subject exchange */
1685 struct fc_els_rrq *rp;
1686 u32 sid;
1687 u16 xid;
1688 enum fc_els_rjt_explan explan;
1689
1690 rp = fc_frame_payload_get(fp, sizeof(*rp));
1691 explan = ELS_EXPL_INV_LEN;
1692 if (!rp)
1693 goto reject;
1694
1695 /*
1696 * lookup subject exchange.
1697 */
1698 ep = fc_seq_exch(sp);
1699 sid = ntoh24(rp->rrq_s_id); /* subject source */
1700 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1701 ep = fc_exch_find(ep->em, xid);
1702
1703 explan = ELS_EXPL_OXID_RXID;
1704 if (!ep)
1705 goto reject;
1706 spin_lock_bh(&ep->ex_lock);
1707 if (ep->oxid != ntohs(rp->rrq_ox_id))
1708 goto unlock_reject;
1709 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1710 ep->rxid != FC_XID_UNKNOWN)
1711 goto unlock_reject;
1712 explan = ELS_EXPL_SID;
1713 if (ep->sid != sid)
1714 goto unlock_reject;
1715
1716 /*
1717 * Clear Recovery Qualifier state, and cancel timer if complete.
1718 */
1719 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1720 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1721 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1722 }
1723 if (ep->esb_stat & ESB_ST_COMPLETE) {
1724 if (cancel_delayed_work(&ep->timeout_work))
1725 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1726 }
1727
1728 spin_unlock_bh(&ep->ex_lock);
1729
1730 /*
1731 * Send LS_ACC.
1732 */
1733 fc_seq_ls_acc(sp);
1734 fc_frame_free(fp);
1735 return;
1736
1737unlock_reject:
1738 spin_unlock_bh(&ep->ex_lock);
1739 fc_exch_release(ep); /* drop hold from fc_exch_find */
1740reject:
1741 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1742 fc_frame_free(fp);
1743}
1744
1745struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1746 enum fc_class class,
1747 u16 min_xid, u16 max_xid)
1748{
1749 struct fc_exch_mgr *mp;
1750 size_t len;
1751
1752 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
1753 FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1754 min_xid, max_xid);
1755 return NULL;
1756 }
1757
1758 /*
1759 * Memory need for EM
1760 */
1761#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1762 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1763 len += sizeof(struct fc_exch_mgr);
1764
1765 mp = kzalloc(len, GFP_ATOMIC);
1766 if (!mp)
1767 return NULL;
1768
1769 mp->class = class;
1770 mp->total_exches = 0;
1771 mp->exches = (struct fc_exch **)(mp + 1);
1772 mp->lp = lp;
1773 /* adjust em exch xid range for offload */
1774 mp->min_xid = min_xid;
1775 mp->max_xid = max_xid;
1776 mp->last_xid = min_xid - 1;
1777 mp->max_read = 0;
1778 mp->last_read = 0;
1779 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1780 mp->max_read = lp->lro_xid;
1781 mp->last_read = min_xid - 1;
1782 mp->last_xid = mp->max_read;
1783 } else {
1784 /* disable lro if no xid control over read */
1785 lp->lro_enabled = 0;
1786 }
1787
1788 INIT_LIST_HEAD(&mp->ex_list);
1789 spin_lock_init(&mp->em_lock);
1790
1791 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1792 if (!mp->ep_pool)
1793 goto free_mp;
1794
1795 return mp;
1796
1797free_mp:
1798 kfree(mp);
1799 return NULL;
1800}
1801EXPORT_SYMBOL(fc_exch_mgr_alloc);
1802
1803void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1804{
1805 WARN_ON(!mp);
1806 /*
1807 * The total exch count must be zero
1808 * before freeing exchange manager.
1809 */
1810 WARN_ON(mp->total_exches != 0);
1811 mempool_destroy(mp->ep_pool);
1812 kfree(mp);
1813}
1814EXPORT_SYMBOL(fc_exch_mgr_free);
1815
1816struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1817{
1818 if (!lp || !lp->emp)
1819 return NULL;
1820
1821 return fc_exch_alloc(lp->emp, fp, 0);
1822}
1823EXPORT_SYMBOL(fc_exch_get);
1824
1825struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1826 struct fc_frame *fp,
1827 void (*resp)(struct fc_seq *,
1828 struct fc_frame *fp,
1829 void *arg),
1830 void (*destructor)(struct fc_seq *, void *),
1831 void *arg, u32 timer_msec)
1832{
1833 struct fc_exch *ep;
1834 struct fc_seq *sp = NULL;
1835 struct fc_frame_header *fh;
1836 int rc = 1;
1837
1838 ep = lp->tt.exch_get(lp, fp);
1839 if (!ep) {
1840 fc_frame_free(fp);
1841 return NULL;
1842 }
1843 ep->esb_stat |= ESB_ST_SEQ_INIT;
1844 fh = fc_frame_header_get(fp);
1845 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1846 ep->resp = resp;
1847 ep->destructor = destructor;
1848 ep->arg = arg;
1849 ep->r_a_tov = FC_DEF_R_A_TOV;
1850 ep->lp = lp;
1851 sp = &ep->seq;
1852
1853 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1854 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1855 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1856 sp->cnt++;
1857
1858 if (unlikely(lp->tt.frame_send(lp, fp)))
1859 goto err;
1860
1861 if (timer_msec)
1862 fc_exch_timer_set_locked(ep, timer_msec);
1863 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1864
1865 if (ep->f_ctl & FC_FC_SEQ_INIT)
1866 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1867 spin_unlock_bh(&ep->ex_lock);
1868 return sp;
1869err:
1870 rc = fc_exch_done_locked(ep);
1871 spin_unlock_bh(&ep->ex_lock);
1872 if (!rc)
1873 fc_exch_mgr_delete_ep(ep);
1874 return NULL;
1875}
1876EXPORT_SYMBOL(fc_exch_seq_send);
1877
1878/*
1879 * Receive a frame
1880 */
1881void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1882 struct fc_frame *fp)
1883{
1884 struct fc_frame_header *fh = fc_frame_header_get(fp);
1885 u32 f_ctl;
1886
1887 /* lport lock ? */
1888 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
1889 FC_DBG("fc_lport or EM is not allocated and configured");
1890 fc_frame_free(fp);
1891 return;
1892 }
1893
1894 /*
1895 * If frame is marked invalid, just drop it.
1896 */
1897 f_ctl = ntoh24(fh->fh_f_ctl);
1898 switch (fr_eof(fp)) {
1899 case FC_EOF_T:
1900 if (f_ctl & FC_FC_END_SEQ)
1901 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1902 /* fall through */
1903 case FC_EOF_N:
1904 if (fh->fh_type == FC_TYPE_BLS)
1905 fc_exch_recv_bls(mp, fp);
1906 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1907 FC_FC_EX_CTX)
1908 fc_exch_recv_seq_resp(mp, fp);
1909 else if (f_ctl & FC_FC_SEQ_CTX)
1910 fc_exch_recv_resp(mp, fp);
1911 else
1912 fc_exch_recv_req(lp, mp, fp);
1913 break;
1914 default:
1915 FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
1916 fc_frame_free(fp);
1917 break;
1918 }
1919}
1920EXPORT_SYMBOL(fc_exch_recv);
1921
1922int fc_exch_init(struct fc_lport *lp)
1923{
1924 if (!lp->tt.exch_get) {
1925 /*
1926 * exch_put() should be NULL if
1927 * exch_get() is NULL
1928 */
1929 WARN_ON(lp->tt.exch_put);
1930 lp->tt.exch_get = fc_exch_get;
1931 }
1932
1933 if (!lp->tt.seq_start_next)
1934 lp->tt.seq_start_next = fc_seq_start_next;
1935
1936 if (!lp->tt.exch_seq_send)
1937 lp->tt.exch_seq_send = fc_exch_seq_send;
1938
1939 if (!lp->tt.seq_send)
1940 lp->tt.seq_send = fc_seq_send;
1941
1942 if (!lp->tt.seq_els_rsp_send)
1943 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1944
1945 if (!lp->tt.exch_done)
1946 lp->tt.exch_done = fc_exch_done;
1947
1948 if (!lp->tt.exch_mgr_reset)
1949 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1950
1951 if (!lp->tt.seq_exch_abort)
1952 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1953
1954 return 0;
1955}
1956EXPORT_SYMBOL(fc_exch_init);
1957
1958int fc_setup_exch_mgr(void)
1959{
1960 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1961 0, SLAB_HWCACHE_ALIGN, NULL);
1962 if (!fc_em_cachep)
1963 return -ENOMEM;
1964 return 0;
1965}
1966
1967void fc_destroy_exch_mgr(void)
1968{
1969 kmem_cache_destroy(fc_em_cachep);
1970}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000000..404e63ff46b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/spinlock.h>
27#include <linux/scatterlist.h>
28#include <linux/err.h>
29#include <linux/crc32.h>
30
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36
37#include <scsi/fc/fc_fc2.h>
38
39#include <scsi/libfc.h>
40#include <scsi/fc_encode.h>
41
42MODULE_AUTHOR("Open-FCoE.org");
43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL");
45
46static int fc_fcp_debug;
47
48#define FC_DEBUG_FCP(fmt...) \
49 do { \
50 if (fc_fcp_debug) \
51 FC_DBG(fmt); \
52 } while (0)
53
54static struct kmem_cache *scsi_pkt_cachep;
55
56/* SRB state definitions */
57#define FC_SRB_FREE 0 /* cmd is free */
58#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
66
67#define FC_SRB_READ (1 << 1)
68#define FC_SRB_WRITE (1 << 0)
69
70/*
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
73 */
74#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
79
80struct fc_fcp_internal {
81 mempool_t *scsi_pkt_pool;
82 struct list_head scsi_pkt_queue;
83 u8 throttled;
84};
85
86#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
87
88/*
89 * function prototypes
90 * FC scsi I/O related functions
91 */
92static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
93static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
94static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
95static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
96static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
97static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
98static void fc_timeout_error(struct fc_fcp_pkt *);
99static void fc_fcp_timeout(unsigned long data);
100static void fc_fcp_rec(struct fc_fcp_pkt *);
101static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
102static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
103static void fc_io_compl(struct fc_fcp_pkt *);
104
105static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
106static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
107static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
108
109/*
110 * command status codes
111 */
112#define FC_COMPLETE 0
113#define FC_CMD_ABORTED 1
114#define FC_CMD_RESET 2
115#define FC_CMD_PLOGO 3
116#define FC_SNS_RCV 4
117#define FC_TRANS_ERR 5
118#define FC_DATA_OVRRUN 6
119#define FC_DATA_UNDRUN 7
120#define FC_ERROR 8
121#define FC_HRD_ERROR 9
122#define FC_CMD_TIME_OUT 10
123
124/*
125 * Error recovery timeout values.
126 */
127#define FC_SCSI_ER_TIMEOUT (10 * HZ)
128#define FC_SCSI_TM_TOV (10 * HZ)
129#define FC_SCSI_REC_TOV (2 * HZ)
130#define FC_HOST_RESET_TIMEOUT (30 * HZ)
131
132#define FC_MAX_ERROR_CNT 5
133#define FC_MAX_RECOV_RETRY 3
134
135#define FC_FCP_DFLT_QUEUE_DEPTH 32
136
137/**
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
141 *
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
145 */
146static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
147{
148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
149 struct fc_fcp_pkt *fsp;
150
151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
152 if (fsp) {
153 memset(fsp, 0, sizeof(*fsp));
154 fsp->lp = lp;
155 atomic_set(&fsp->ref_cnt, 1);
156 init_timer(&fsp->timer);
157 INIT_LIST_HEAD(&fsp->list);
158 spin_lock_init(&fsp->scsi_pkt_lock);
159 }
160 return fsp;
161}
162
163/**
164 * fc_fcp_pkt_release - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
166 *
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
170 */
171static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
172{
173 if (atomic_dec_and_test(&fsp->ref_cnt)) {
174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
175
176 mempool_free(fsp, si->scsi_pkt_pool);
177 }
178}
179
180static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
181{
182 atomic_inc(&fsp->ref_cnt);
183}
184
185/**
186 * fc_fcp_pkt_destory - release hold on scsi_pkt packet
187 *
188 * @seq: exchange sequence
189 * @fsp: fcp packet struct
190 *
191 * Release hold on scsi_pkt packet set to keep scsi_pkt
192 * till EM layer exch resource is not freed.
193 * Context : called from from EM layer.
194 * no locking required
195 */
196static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
197{
198 fc_fcp_pkt_release(fsp);
199}
200
201/**
202 * fc_fcp_lock_pkt - lock a packet and get a ref to it.
203 * @fsp: fcp packet
204 *
205 * We should only return error if we return a command to scsi-ml before
206 * getting a response. This can happen in cases where we send a abort, but
207 * do not wait for the response and the abort and command can be passing
208 * each other on the wire/network-layer.
209 *
210 * Note: this function locks the packet and gets a reference to allow
211 * callers to call the completion function while the lock is held and
212 * not have to worry about the packets refcount.
213 *
214 * TODO: Maybe we should just have callers grab/release the lock and
215 * have a function that they call to verify the fsp and grab a ref if
216 * needed.
217 */
218static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
219{
220 spin_lock_bh(&fsp->scsi_pkt_lock);
221 if (fsp->state & FC_SRB_COMPL) {
222 spin_unlock_bh(&fsp->scsi_pkt_lock);
223 return -EPERM;
224 }
225
226 fc_fcp_pkt_hold(fsp);
227 return 0;
228}
229
230static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
231{
232 spin_unlock_bh(&fsp->scsi_pkt_lock);
233 fc_fcp_pkt_release(fsp);
234}
235
236static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
237{
238 if (!(fsp->state & FC_SRB_COMPL))
239 mod_timer(&fsp->timer, jiffies + delay);
240}
241
242static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
243{
244 if (!fsp->seq_ptr)
245 return -EINVAL;
246
247 fsp->state |= FC_SRB_ABORT_PENDING;
248 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
249}
250
251/*
252 * Retry command.
253 * An abort isn't needed.
254 */
255static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
256{
257 if (fsp->seq_ptr) {
258 fsp->lp->tt.exch_done(fsp->seq_ptr);
259 fsp->seq_ptr = NULL;
260 }
261
262 fsp->state &= ~FC_SRB_ABORT_PENDING;
263 fsp->io_status = SUGGEST_RETRY << 24;
264 fsp->status_code = FC_ERROR;
265 fc_fcp_complete_locked(fsp);
266}
267
268/*
269 * Receive SCSI data from target.
270 * Called after receiving solicited data.
271 */
272static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
273{
274 struct scsi_cmnd *sc = fsp->cmd;
275 struct fc_lport *lp = fsp->lp;
276 struct fcoe_dev_stats *stats;
277 struct fc_frame_header *fh;
278 size_t start_offset;
279 size_t offset;
280 u32 crc;
281 u32 copy_len = 0;
282 size_t len;
283 void *buf;
284 struct scatterlist *sg;
285 size_t remaining;
286
287 fh = fc_frame_header_get(fp);
288 offset = ntohl(fh->fh_parm_offset);
289 start_offset = offset;
290 len = fr_len(fp) - sizeof(*fh);
291 buf = fc_frame_payload_get(fp, 0);
292
293 if (offset + len > fsp->data_len) {
294 /*
295 * this should never happen
296 */
297 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
298 fc_frame_crc_check(fp))
299 goto crc_err;
300 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
301 "data_len %x\n", len, offset, fsp->data_len);
302 fc_fcp_retry_cmd(fsp);
303 return;
304 }
305 if (offset != fsp->xfer_len)
306 fsp->state |= FC_SRB_DISCONTIG;
307
308 crc = 0;
309 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
310 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
311
312 sg = scsi_sglist(sc);
313 remaining = len;
314
315 while (remaining > 0 && sg) {
316 size_t off;
317 void *page_addr;
318 size_t sg_bytes;
319
320 if (offset >= sg->length) {
321 offset -= sg->length;
322 sg = sg_next(sg);
323 continue;
324 }
325 sg_bytes = min(remaining, sg->length - offset);
326
327 /*
328 * The scatterlist item may be bigger than PAGE_SIZE,
329 * but we are limited to mapping PAGE_SIZE at a time.
330 */
331 off = offset + sg->offset;
332 sg_bytes = min(sg_bytes, (size_t)
333 (PAGE_SIZE - (off & ~PAGE_MASK)));
334 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
335 KM_SOFTIRQ0);
336 if (!page_addr)
337 break; /* XXX panic? */
338
339 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
340 crc = crc32(crc, buf, sg_bytes);
341 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
342 sg_bytes);
343
344 kunmap_atomic(page_addr, KM_SOFTIRQ0);
345 buf += sg_bytes;
346 offset += sg_bytes;
347 remaining -= sg_bytes;
348 copy_len += sg_bytes;
349 }
350
351 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
352 buf = fc_frame_payload_get(fp, 0);
353 if (len % 4) {
354 crc = crc32(crc, buf + len, 4 - (len % 4));
355 len += 4 - (len % 4);
356 }
357
358 if (~crc != le32_to_cpu(fr_crc(fp))) {
359crc_err:
360 stats = lp->dev_stats[smp_processor_id()];
361 stats->ErrorFrames++;
362 if (stats->InvalidCRCCount++ < 5)
363 FC_DBG("CRC error on data frame\n");
364 /*
365 * Assume the frame is total garbage.
366 * We may have copied it over the good part
367 * of the buffer.
368 * If so, we need to retry the entire operation.
369 * Otherwise, ignore it.
370 */
371 if (fsp->state & FC_SRB_DISCONTIG)
372 fc_fcp_retry_cmd(fsp);
373 return;
374 }
375 }
376
377 if (fsp->xfer_contig_end == start_offset)
378 fsp->xfer_contig_end += copy_len;
379 fsp->xfer_len += copy_len;
380
381 /*
382 * In the very rare event that this data arrived after the response
383 * and completes the transfer, call the completion handler.
384 */
385 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
386 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
387 fc_fcp_complete_locked(fsp);
388}
389
390/*
391 * fc_fcp_send_data - Send SCSI data to target.
392 * @fsp: ptr to fc_fcp_pkt
393 * @sp: ptr to this sequence
394 * @offset: starting offset for this data request
395 * @seq_blen: the burst length for this data request
396 *
397 * Called after receiving a Transfer Ready data descriptor.
398 * if LLD is capable of seq offload then send down seq_blen
399 * size of data in single frame, otherwise send multiple FC
400 * frames of max FC frame payload supported by target port.
401 *
402 * Returns : 0 for success.
403 */
404static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
405 size_t offset, size_t seq_blen)
406{
407 struct fc_exch *ep;
408 struct scsi_cmnd *sc;
409 struct scatterlist *sg;
410 struct fc_frame *fp = NULL;
411 struct fc_lport *lp = fsp->lp;
412 size_t remaining;
413 size_t t_blen;
414 size_t tlen;
415 size_t sg_bytes;
416 size_t frame_offset, fh_parm_offset;
417 int error;
418 void *data = NULL;
419 void *page_addr;
420 int using_sg = lp->sg_supp;
421 u32 f_ctl;
422
423 WARN_ON(seq_blen <= 0);
424 if (unlikely(offset + seq_blen > fsp->data_len)) {
425 /* this should never happen */
426 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
427 seq_blen, offset);
428 fc_fcp_send_abort(fsp);
429 return 0;
430 } else if (offset != fsp->xfer_len) {
431 /* Out of Order Data Request - no problem, but unexpected. */
432 FC_DEBUG_FCP("xfer-ready non-contiguous. "
433 "seq_blen %zx offset %zx\n", seq_blen, offset);
434 }
435
436 /*
437 * if LLD is capable of seq_offload then set transport
438 * burst length (t_blen) to seq_blen, otherwise set t_blen
439 * to max FC frame payload previously set in fsp->max_payload.
440 */
441 t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
442 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
443 if (t_blen > 512)
444 t_blen &= ~(512 - 1); /* round down to block size */
445 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
446 sc = fsp->cmd;
447
448 remaining = seq_blen;
449 fh_parm_offset = frame_offset = offset;
450 tlen = 0;
451 seq = lp->tt.seq_start_next(seq);
452 f_ctl = FC_FC_REL_OFF;
453 WARN_ON(!seq);
454
455 /*
456 * If a get_page()/put_page() will fail, don't use sg lists
457 * in the fc_frame structure.
458 *
459 * The put_page() may be long after the I/O has completed
460 * in the case of FCoE, since the network driver does it
461 * via free_skb(). See the test in free_pages_check().
462 *
463 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
464 */
465 if (using_sg) {
466 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
467 if (page_count(sg_page(sg)) == 0 ||
468 (sg_page(sg)->flags & (1 << PG_lru |
469 1 << PG_private |
470 1 << PG_locked |
471 1 << PG_active |
472 1 << PG_slab |
473 1 << PG_swapcache |
474 1 << PG_writeback |
475 1 << PG_reserved |
476 1 << PG_buddy))) {
477 using_sg = 0;
478 break;
479 }
480 }
481 }
482 sg = scsi_sglist(sc);
483
484 while (remaining > 0 && sg) {
485 if (offset >= sg->length) {
486 offset -= sg->length;
487 sg = sg_next(sg);
488 continue;
489 }
490 if (!fp) {
491 tlen = min(t_blen, remaining);
492
493 /*
494 * TODO. Temporary workaround. fc_seq_send() can't
495 * handle odd lengths in non-linear skbs.
496 * This will be the final fragment only.
497 */
498 if (tlen % 4)
499 using_sg = 0;
500 if (using_sg) {
501 fp = _fc_frame_alloc(lp, 0);
502 if (!fp)
503 return -ENOMEM;
504 } else {
505 fp = fc_frame_alloc(lp, tlen);
506 if (!fp)
507 return -ENOMEM;
508
509 data = (void *)(fr_hdr(fp)) +
510 sizeof(struct fc_frame_header);
511 }
512 fh_parm_offset = frame_offset;
513 fr_max_payload(fp) = fsp->max_payload;
514 }
515 sg_bytes = min(tlen, sg->length - offset);
516 if (using_sg) {
517 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
518 FC_FRAME_SG_LEN);
519 get_page(sg_page(sg));
520 skb_fill_page_desc(fp_skb(fp),
521 skb_shinfo(fp_skb(fp))->nr_frags,
522 sg_page(sg), sg->offset + offset,
523 sg_bytes);
524 fp_skb(fp)->data_len += sg_bytes;
525 fr_len(fp) += sg_bytes;
526 fp_skb(fp)->truesize += PAGE_SIZE;
527 } else {
528 size_t off = offset + sg->offset;
529
530 /*
531 * The scatterlist item may be bigger than PAGE_SIZE,
532 * but we must not cross pages inside the kmap.
533 */
534 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
535 (off & ~PAGE_MASK)));
536 page_addr = kmap_atomic(sg_page(sg) +
537 (off >> PAGE_SHIFT),
538 KM_SOFTIRQ0);
539 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
540 sg_bytes);
541 kunmap_atomic(page_addr, KM_SOFTIRQ0);
542 data += sg_bytes;
543 }
544 offset += sg_bytes;
545 frame_offset += sg_bytes;
546 tlen -= sg_bytes;
547 remaining -= sg_bytes;
548
549 if (tlen)
550 continue;
551
552 /*
553 * Send sequence with transfer sequence initiative in case
554 * this is last FCP frame of the sequence.
555 */
556 if (remaining == 0)
557 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
558
559 ep = fc_seq_exch(seq);
560 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
561 FC_TYPE_FCP, f_ctl, fh_parm_offset);
562
563 /*
564 * send fragment using for a sequence.
565 */
566 error = lp->tt.seq_send(lp, seq, fp);
567 if (error) {
568 WARN_ON(1); /* send error should be rare */
569 fc_fcp_retry_cmd(fsp);
570 return 0;
571 }
572 fp = NULL;
573 }
574 fsp->xfer_len += seq_blen; /* premature count? */
575 return 0;
576}
577
578static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
579{
580 int ba_done = 1;
581 struct fc_ba_rjt *brp;
582 struct fc_frame_header *fh;
583
584 fh = fc_frame_header_get(fp);
585 switch (fh->fh_r_ctl) {
586 case FC_RCTL_BA_ACC:
587 break;
588 case FC_RCTL_BA_RJT:
589 brp = fc_frame_payload_get(fp, sizeof(*brp));
590 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
591 break;
592 /* fall thru */
593 default:
594 /*
595 * we will let the command timeout
596 * and scsi-ml recover in this case,
597 * therefore cleared the ba_done flag.
598 */
599 ba_done = 0;
600 }
601
602 if (ba_done) {
603 fsp->state |= FC_SRB_ABORTED;
604 fsp->state &= ~FC_SRB_ABORT_PENDING;
605
606 if (fsp->wait_for_comp)
607 complete(&fsp->tm_done);
608 else
609 fc_fcp_complete_locked(fsp);
610 }
611}
612
613/*
614 * fc_fcp_reduce_can_queue - drop can_queue
615 * @lp: lport to drop queueing for
616 *
617 * If we are getting memory allocation failures, then we may
618 * be trying to execute too many commands. We let the running
619 * commands complete or timeout, then try again with a reduced
620 * can_queue. Eventually we will hit the point where we run
621 * on all reserved structs.
622 */
623static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
624{
625 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
626 unsigned long flags;
627 int can_queue;
628
629 spin_lock_irqsave(lp->host->host_lock, flags);
630 if (si->throttled)
631 goto done;
632 si->throttled = 1;
633
634 can_queue = lp->host->can_queue;
635 can_queue >>= 1;
636 if (!can_queue)
637 can_queue = 1;
638 lp->host->can_queue = can_queue;
639 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
640 "Reducing can_queue to %d.\n", can_queue);
641done:
642 spin_unlock_irqrestore(lp->host->host_lock, flags);
643}
644
645/*
646 * exch mgr calls this routine to process scsi
647 * exchanges.
648 *
649 * Return : None
650 * Context : called from Soft IRQ context
651 * can not called holding list lock
652 */
653static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
654{
655 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
656 struct fc_lport *lp;
657 struct fc_frame_header *fh;
658 struct fcp_txrdy *dd;
659 u8 r_ctl;
660 int rc = 0;
661
662 if (IS_ERR(fp))
663 goto errout;
664
665 fh = fc_frame_header_get(fp);
666 r_ctl = fh->fh_r_ctl;
667 lp = fsp->lp;
668
669 if (!(lp->state & LPORT_ST_READY))
670 goto out;
671 if (fc_fcp_lock_pkt(fsp))
672 goto out;
673 fsp->last_pkt_time = jiffies;
674
675 if (fh->fh_type == FC_TYPE_BLS) {
676 fc_fcp_abts_resp(fsp, fp);
677 goto unlock;
678 }
679
680 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
681 goto unlock;
682
683 if (r_ctl == FC_RCTL_DD_DATA_DESC) {
684 /*
685 * received XFER RDY from the target
686 * need to send data to the target
687 */
688 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
689 dd = fc_frame_payload_get(fp, sizeof(*dd));
690 WARN_ON(!dd);
691
692 rc = fc_fcp_send_data(fsp, seq,
693 (size_t) ntohl(dd->ft_data_ro),
694 (size_t) ntohl(dd->ft_burst_len));
695 if (!rc)
696 seq->rec_data = fsp->xfer_len;
697 else if (rc == -ENOMEM)
698 fsp->state |= FC_SRB_NOMEM;
699 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
700 /*
701 * received a DATA frame
702 * next we will copy the data to the system buffer
703 */
704 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
705 fc_fcp_recv_data(fsp, fp);
706 seq->rec_data = fsp->xfer_contig_end;
707 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
708 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
709
710 fc_fcp_resp(fsp, fp);
711 } else {
712 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
713 }
714unlock:
715 fc_fcp_unlock_pkt(fsp);
716out:
717 fc_frame_free(fp);
718errout:
719 if (IS_ERR(fp))
720 fc_fcp_error(fsp, fp);
721 else if (rc == -ENOMEM)
722 fc_fcp_reduce_can_queue(lp);
723}
724
725static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
726{
727 struct fc_frame_header *fh;
728 struct fcp_resp *fc_rp;
729 struct fcp_resp_ext *rp_ex;
730 struct fcp_resp_rsp_info *fc_rp_info;
731 u32 plen;
732 u32 expected_len;
733 u32 respl = 0;
734 u32 snsl = 0;
735 u8 flags = 0;
736
737 plen = fr_len(fp);
738 fh = (struct fc_frame_header *)fr_hdr(fp);
739 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
740 goto len_err;
741 plen -= sizeof(*fh);
742 fc_rp = (struct fcp_resp *)(fh + 1);
743 fsp->cdb_status = fc_rp->fr_status;
744 flags = fc_rp->fr_flags;
745 fsp->scsi_comp_flags = flags;
746 expected_len = fsp->data_len;
747
748 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
749 rp_ex = (void *)(fc_rp + 1);
750 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
751 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
752 goto len_err;
753 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
754 if (flags & FCP_RSP_LEN_VAL) {
755 respl = ntohl(rp_ex->fr_rsp_len);
756 if (respl != sizeof(*fc_rp_info))
757 goto len_err;
758 if (fsp->wait_for_comp) {
759 /* Abuse cdb_status for rsp code */
760 fsp->cdb_status = fc_rp_info->rsp_code;
761 complete(&fsp->tm_done);
762 /*
763 * tmfs will not have any scsi cmd so
764 * exit here
765 */
766 return;
767 } else
768 goto err;
769 }
770 if (flags & FCP_SNS_LEN_VAL) {
771 snsl = ntohl(rp_ex->fr_sns_len);
772 if (snsl > SCSI_SENSE_BUFFERSIZE)
773 snsl = SCSI_SENSE_BUFFERSIZE;
774 memcpy(fsp->cmd->sense_buffer,
775 (char *)fc_rp_info + respl, snsl);
776 }
777 }
778 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
779 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
780 goto len_err;
781 if (flags & FCP_RESID_UNDER) {
782 fsp->scsi_resid = ntohl(rp_ex->fr_resid);
783 /*
784 * The cmnd->underflow is the minimum number of
785 * bytes that must be transfered for this
786 * command. Provided a sense condition is not
787 * present, make sure the actual amount
788 * transferred is at least the underflow value
789 * or fail.
790 */
791 if (!(flags & FCP_SNS_LEN_VAL) &&
792 (fc_rp->fr_status == 0) &&
793 (scsi_bufflen(fsp->cmd) -
794 fsp->scsi_resid) < fsp->cmd->underflow)
795 goto err;
796 expected_len -= fsp->scsi_resid;
797 } else {
798 fsp->status_code = FC_ERROR;
799 }
800 }
801 }
802 fsp->state |= FC_SRB_RCV_STATUS;
803
804 /*
805 * Check for missing or extra data frames.
806 */
807 if (unlikely(fsp->xfer_len != expected_len)) {
808 if (fsp->xfer_len < expected_len) {
809 /*
810 * Some data may be queued locally,
811 * Wait a at least one jiffy to see if it is delivered.
812 * If this expires without data, we may do SRR.
813 */
814 fc_fcp_timer_set(fsp, 2);
815 return;
816 }
817 fsp->status_code = FC_DATA_OVRRUN;
818 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
819 "data len %x\n",
820 fsp->rport->port_id,
821 fsp->xfer_len, expected_len, fsp->data_len);
822 }
823 fc_fcp_complete_locked(fsp);
824 return;
825
826len_err:
827 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
828 flags, fr_len(fp), respl, snsl);
829err:
830 fsp->status_code = FC_ERROR;
831 fc_fcp_complete_locked(fsp);
832}
833
834/**
835 * fc_fcp_complete_locked - complete processing of a fcp packet
836 * @fsp: fcp packet
837 *
838 * This function may sleep if a timer is pending. The packet lock must be
839 * held, and the host lock must not be held.
840 */
841static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
842{
843 struct fc_lport *lp = fsp->lp;
844 struct fc_seq *seq;
845 struct fc_exch *ep;
846 u32 f_ctl;
847
848 if (fsp->state & FC_SRB_ABORT_PENDING)
849 return;
850
851 if (fsp->state & FC_SRB_ABORTED) {
852 if (!fsp->status_code)
853 fsp->status_code = FC_CMD_ABORTED;
854 } else {
855 /*
856 * Test for transport underrun, independent of response
857 * underrun status.
858 */
859 if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
860 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
861 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
862 fsp->status_code = FC_DATA_UNDRUN;
863 fsp->io_status = SUGGEST_RETRY << 24;
864 }
865 }
866
867 seq = fsp->seq_ptr;
868 if (seq) {
869 fsp->seq_ptr = NULL;
870 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
871 struct fc_frame *conf_frame;
872 struct fc_seq *csp;
873
874 csp = lp->tt.seq_start_next(seq);
875 conf_frame = fc_frame_alloc(fsp->lp, 0);
876 if (conf_frame) {
877 f_ctl = FC_FC_SEQ_INIT;
878 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
879 ep = fc_seq_exch(seq);
880 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
881 ep->did, ep->sid,
882 FC_TYPE_FCP, f_ctl, 0);
883 lp->tt.seq_send(lp, csp, conf_frame);
884 }
885 }
886 lp->tt.exch_done(seq);
887 }
888 fc_io_compl(fsp);
889}
890
891static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
892{
893 struct fc_lport *lp = fsp->lp;
894
895 if (fsp->seq_ptr) {
896 lp->tt.exch_done(fsp->seq_ptr);
897 fsp->seq_ptr = NULL;
898 }
899 fsp->status_code = error;
900}
901
902/**
903 * fc_fcp_cleanup_each_cmd - run fn on each active command
904 * @lp: logical port
905 * @id: target id
906 * @lun: lun
907 * @error: fsp status code
908 *
909 * If lun or id is -1, they are ignored.
910 */
911static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
912 unsigned int lun, int error)
913{
914 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
915 struct fc_fcp_pkt *fsp;
916 struct scsi_cmnd *sc_cmd;
917 unsigned long flags;
918
919 spin_lock_irqsave(lp->host->host_lock, flags);
920restart:
921 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
922 sc_cmd = fsp->cmd;
923 if (id != -1 && scmd_id(sc_cmd) != id)
924 continue;
925
926 if (lun != -1 && sc_cmd->device->lun != lun)
927 continue;
928
929 fc_fcp_pkt_hold(fsp);
930 spin_unlock_irqrestore(lp->host->host_lock, flags);
931
932 if (!fc_fcp_lock_pkt(fsp)) {
933 fc_fcp_cleanup_cmd(fsp, error);
934 fc_io_compl(fsp);
935 fc_fcp_unlock_pkt(fsp);
936 }
937
938 fc_fcp_pkt_release(fsp);
939 spin_lock_irqsave(lp->host->host_lock, flags);
940 /*
941 * while we dropped the lock multiple pkts could
942 * have been released, so we have to start over.
943 */
944 goto restart;
945 }
946 spin_unlock_irqrestore(lp->host->host_lock, flags);
947}
948
949static void fc_fcp_abort_io(struct fc_lport *lp)
950{
951 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
952}
953
954/**
955 * fc_fcp_pkt_send - send a fcp packet to the lower level.
956 * @lp: fc lport
957 * @fsp: fc packet.
958 *
959 * This is called by upper layer protocol.
960 * Return : zero for success and -1 for failure
961 * Context : called from queuecommand which can be called from process
962 * or scsi soft irq.
963 * Locks : called with the host lock and irqs disabled.
964 */
965static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
966{
967 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
968 int rc;
969
970 fsp->cmd->SCp.ptr = (char *)fsp;
971 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
972 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
973
974 int_to_scsilun(fsp->cmd->device->lun,
975 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
976 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
977 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
978
979 spin_unlock_irq(lp->host->host_lock);
980 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
981 spin_lock_irq(lp->host->host_lock);
982 if (rc)
983 list_del(&fsp->list);
984
985 return rc;
986}
987
988static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
989 void (*resp)(struct fc_seq *,
990 struct fc_frame *fp,
991 void *arg))
992{
993 struct fc_frame *fp;
994 struct fc_seq *seq;
995 struct fc_rport *rport;
996 struct fc_rport_libfc_priv *rp;
997 const size_t len = sizeof(fsp->cdb_cmd);
998 int rc = 0;
999
1000 if (fc_fcp_lock_pkt(fsp))
1001 return 0;
1002
1003 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
1004 if (!fp) {
1005 rc = -1;
1006 goto unlock;
1007 }
1008
1009 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1010 fr_cmd(fp) = fsp->cmd;
1011 rport = fsp->rport;
1012 fsp->max_payload = rport->maxframe_size;
1013 rp = rport->dd_data;
1014
1015 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1016 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1017 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1018
1019 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
1020 if (!seq) {
1021 fc_frame_free(fp);
1022 rc = -1;
1023 goto unlock;
1024 }
1025 fsp->last_pkt_time = jiffies;
1026 fsp->seq_ptr = seq;
1027 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1028
1029 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1030 fc_fcp_timer_set(fsp,
1031 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
1032 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
1033unlock:
1034 fc_fcp_unlock_pkt(fsp);
1035 return rc;
1036}
1037
1038/*
1039 * transport error handler
1040 */
1041static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1042{
1043 int error = PTR_ERR(fp);
1044
1045 if (fc_fcp_lock_pkt(fsp))
1046 return;
1047
1048 switch (error) {
1049 case -FC_EX_CLOSED:
1050 fc_fcp_retry_cmd(fsp);
1051 goto unlock;
1052 default:
1053 FC_DBG("unknown error %ld\n", PTR_ERR(fp));
1054 }
1055 /*
1056 * clear abort pending, because the lower layer
1057 * decided to force completion.
1058 */
1059 fsp->state &= ~FC_SRB_ABORT_PENDING;
1060 fsp->status_code = FC_CMD_PLOGO;
1061 fc_fcp_complete_locked(fsp);
1062unlock:
1063 fc_fcp_unlock_pkt(fsp);
1064}
1065
1066/*
1067 * Scsi abort handler- calls to send an abort
1068 * and then wait for abort completion
1069 */
1070static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1071{
1072 int rc = FAILED;
1073
1074 if (fc_fcp_send_abort(fsp))
1075 return FAILED;
1076
1077 init_completion(&fsp->tm_done);
1078 fsp->wait_for_comp = 1;
1079
1080 spin_unlock_bh(&fsp->scsi_pkt_lock);
1081 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1082 spin_lock_bh(&fsp->scsi_pkt_lock);
1083 fsp->wait_for_comp = 0;
1084
1085 if (!rc) {
1086 FC_DBG("target abort cmd failed\n");
1087 rc = FAILED;
1088 } else if (fsp->state & FC_SRB_ABORTED) {
1089 FC_DBG("target abort cmd passed\n");
1090 rc = SUCCESS;
1091 fc_fcp_complete_locked(fsp);
1092 }
1093
1094 return rc;
1095}
1096
1097/*
1098 * Retry LUN reset after resource allocation failed.
1099 */
1100static void fc_lun_reset_send(unsigned long data)
1101{
1102 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1103 struct fc_lport *lp = fsp->lp;
1104 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
1105 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1106 return;
1107 if (fc_fcp_lock_pkt(fsp))
1108 return;
1109 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1110 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1111 fc_fcp_unlock_pkt(fsp);
1112 }
1113}
1114
1115/*
1116 * Scsi device reset handler- send a LUN RESET to the device
1117 * and wait for reset reply
1118 */
1119static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1120 unsigned int id, unsigned int lun)
1121{
1122 int rc;
1123
1124 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
1125 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
1126 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1127
1128 fsp->wait_for_comp = 1;
1129 init_completion(&fsp->tm_done);
1130
1131 fc_lun_reset_send((unsigned long)fsp);
1132
1133 /*
1134 * wait for completion of reset
1135 * after that make sure all commands are terminated
1136 */
1137 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1138
1139 spin_lock_bh(&fsp->scsi_pkt_lock);
1140 fsp->state |= FC_SRB_COMPL;
1141 spin_unlock_bh(&fsp->scsi_pkt_lock);
1142
1143 del_timer_sync(&fsp->timer);
1144
1145 spin_lock_bh(&fsp->scsi_pkt_lock);
1146 if (fsp->seq_ptr) {
1147 lp->tt.exch_done(fsp->seq_ptr);
1148 fsp->seq_ptr = NULL;
1149 }
1150 fsp->wait_for_comp = 0;
1151 spin_unlock_bh(&fsp->scsi_pkt_lock);
1152
1153 if (!rc) {
1154 FC_DBG("lun reset failed\n");
1155 return FAILED;
1156 }
1157
1158 /* cdb_status holds the tmf's rsp code */
1159 if (fsp->cdb_status != FCP_TMF_CMPL)
1160 return FAILED;
1161
1162 FC_DBG("lun reset to lun %u completed\n", lun);
1163 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
1164 return SUCCESS;
1165}
1166
1167/*
1168 * Task Managment response handler
1169 */
1170static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1171{
1172 struct fc_fcp_pkt *fsp = arg;
1173 struct fc_frame_header *fh;
1174
1175 if (IS_ERR(fp)) {
1176 /*
1177 * If there is an error just let it timeout or wait
1178 * for TMF to be aborted if it timedout.
1179 *
1180 * scsi-eh will escalate for when either happens.
1181 */
1182 return;
1183 }
1184
1185 if (fc_fcp_lock_pkt(fsp))
1186 return;
1187
1188 /*
1189 * raced with eh timeout handler.
1190 */
1191 if (!fsp->seq_ptr || !fsp->wait_for_comp) {
1192 spin_unlock_bh(&fsp->scsi_pkt_lock);
1193 return;
1194 }
1195
1196 fh = fc_frame_header_get(fp);
1197 if (fh->fh_type != FC_TYPE_BLS)
1198 fc_fcp_resp(fsp, fp);
1199 fsp->seq_ptr = NULL;
1200 fsp->lp->tt.exch_done(seq);
1201 fc_frame_free(fp);
1202 fc_fcp_unlock_pkt(fsp);
1203}
1204
1205static void fc_fcp_cleanup(struct fc_lport *lp)
1206{
1207 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
1208}
1209
1210/*
1211 * fc_fcp_timeout: called by OS timer function.
1212 *
1213 * The timer has been inactivated and must be reactivated if desired
1214 * using fc_fcp_timer_set().
1215 *
1216 * Algorithm:
1217 *
1218 * If REC is supported, just issue it, and return. The REC exchange will
1219 * complete or time out, and recovery can continue at that point.
1220 *
1221 * Otherwise, if the response has been received without all the data,
1222 * it has been ER_TIMEOUT since the response was received.
1223 *
1224 * If the response has not been received,
1225 * we see if data was received recently. If it has been, we continue waiting,
1226 * otherwise, we abort the command.
1227 */
1228static void fc_fcp_timeout(unsigned long data)
1229{
1230 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1231 struct fc_rport *rport = fsp->rport;
1232 struct fc_rport_libfc_priv *rp = rport->dd_data;
1233
1234 if (fc_fcp_lock_pkt(fsp))
1235 return;
1236
1237 if (fsp->cdb_cmd.fc_tm_flags)
1238 goto unlock;
1239
1240 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1241
1242 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
1243 fc_fcp_rec(fsp);
1244 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1245 jiffies))
1246 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1247 else if (fsp->state & FC_SRB_RCV_STATUS)
1248 fc_fcp_complete_locked(fsp);
1249 else
1250 fc_timeout_error(fsp);
1251 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1252unlock:
1253 fc_fcp_unlock_pkt(fsp);
1254}
1255
1256/*
1257 * Send a REC ELS request
1258 */
1259static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1260{
1261 struct fc_lport *lp;
1262 struct fc_frame *fp;
1263 struct fc_rport *rport;
1264 struct fc_rport_libfc_priv *rp;
1265
1266 lp = fsp->lp;
1267 rport = fsp->rport;
1268 rp = rport->dd_data;
1269 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
1270 fsp->status_code = FC_HRD_ERROR;
1271 fsp->io_status = SUGGEST_RETRY << 24;
1272 fc_fcp_complete_locked(fsp);
1273 return;
1274 }
1275 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
1276 if (!fp)
1277 goto retry;
1278
1279 fr_seq(fp) = fsp->seq_ptr;
1280 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1281 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
1282 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1283 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
1284 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1285 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1286 return;
1287 }
1288 fc_frame_free(fp);
1289retry:
1290 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1291 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1292 else
1293 fc_timeout_error(fsp);
1294}
1295
1296/*
1297 * Receive handler for REC ELS frame
1298 * if it is a reject then let the scsi layer to handle
1299 * the timeout. if it is a LS_ACC then if the io was not completed
1300 * then set the timeout and return otherwise complete the exchange
1301 * and tell the scsi layer to restart the I/O.
1302 */
1303static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1304{
1305 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
1306 struct fc_els_rec_acc *recp;
1307 struct fc_els_ls_rjt *rjt;
1308 u32 e_stat;
1309 u8 opcode;
1310 u32 offset;
1311 enum dma_data_direction data_dir;
1312 enum fc_rctl r_ctl;
1313 struct fc_rport_libfc_priv *rp;
1314
1315 if (IS_ERR(fp)) {
1316 fc_fcp_rec_error(fsp, fp);
1317 return;
1318 }
1319
1320 if (fc_fcp_lock_pkt(fsp))
1321 goto out;
1322
1323 fsp->recov_retry = 0;
1324 opcode = fc_frame_payload_op(fp);
1325 if (opcode == ELS_LS_RJT) {
1326 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1327 switch (rjt->er_reason) {
1328 default:
1329 FC_DEBUG_FCP("device %x unexpected REC reject "
1330 "reason %d expl %d\n",
1331 fsp->rport->port_id, rjt->er_reason,
1332 rjt->er_explan);
1333 /* fall through */
1334 case ELS_RJT_UNSUP:
1335 FC_DEBUG_FCP("device does not support REC\n");
1336 rp = fsp->rport->dd_data;
1337 /*
1338 * if we do not spport RECs or got some bogus
1339 * reason then resetup timer so we check for
1340 * making progress.
1341 */
1342 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1343 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1344 break;
1345 case ELS_RJT_LOGIC:
1346 case ELS_RJT_UNAB:
1347 /*
1348 * If no data transfer, the command frame got dropped
1349 * so we just retry. If data was transferred, we
1350 * lost the response but the target has no record,
1351 * so we abort and retry.
1352 */
1353 if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
1354 fsp->xfer_len == 0) {
1355 fc_fcp_retry_cmd(fsp);
1356 break;
1357 }
1358 fc_timeout_error(fsp);
1359 break;
1360 }
1361 } else if (opcode == ELS_LS_ACC) {
1362 if (fsp->state & FC_SRB_ABORTED)
1363 goto unlock_out;
1364
1365 data_dir = fsp->cmd->sc_data_direction;
1366 recp = fc_frame_payload_get(fp, sizeof(*recp));
1367 offset = ntohl(recp->reca_fc4value);
1368 e_stat = ntohl(recp->reca_e_stat);
1369
1370 if (e_stat & ESB_ST_COMPLETE) {
1371
1372 /*
1373 * The exchange is complete.
1374 *
1375 * For output, we must've lost the response.
1376 * For input, all data must've been sent.
1377 * We lost may have lost the response
1378 * (and a confirmation was requested) and maybe
1379 * some data.
1380 *
1381 * If all data received, send SRR
1382 * asking for response. If partial data received,
1383 * or gaps, SRR requests data at start of gap.
1384 * Recovery via SRR relies on in-order-delivery.
1385 */
1386 if (data_dir == DMA_TO_DEVICE) {
1387 r_ctl = FC_RCTL_DD_CMD_STATUS;
1388 } else if (fsp->xfer_contig_end == offset) {
1389 r_ctl = FC_RCTL_DD_CMD_STATUS;
1390 } else {
1391 offset = fsp->xfer_contig_end;
1392 r_ctl = FC_RCTL_DD_SOL_DATA;
1393 }
1394 fc_fcp_srr(fsp, r_ctl, offset);
1395 } else if (e_stat & ESB_ST_SEQ_INIT) {
1396
1397 /*
1398 * The remote port has the initiative, so just
1399 * keep waiting for it to complete.
1400 */
1401 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1402 } else {
1403
1404 /*
1405 * The exchange is incomplete, we have seq. initiative.
1406 * Lost response with requested confirmation,
1407 * lost confirmation, lost transfer ready or
1408 * lost write data.
1409 *
1410 * For output, if not all data was received, ask
1411 * for transfer ready to be repeated.
1412 *
1413 * If we received or sent all the data, send SRR to
1414 * request response.
1415 *
1416 * If we lost a response, we may have lost some read
1417 * data as well.
1418 */
1419 r_ctl = FC_RCTL_DD_SOL_DATA;
1420 if (data_dir == DMA_TO_DEVICE) {
1421 r_ctl = FC_RCTL_DD_CMD_STATUS;
1422 if (offset < fsp->data_len)
1423 r_ctl = FC_RCTL_DD_DATA_DESC;
1424 } else if (offset == fsp->xfer_contig_end) {
1425 r_ctl = FC_RCTL_DD_CMD_STATUS;
1426 } else if (fsp->xfer_contig_end < offset) {
1427 offset = fsp->xfer_contig_end;
1428 }
1429 fc_fcp_srr(fsp, r_ctl, offset);
1430 }
1431 }
1432unlock_out:
1433 fc_fcp_unlock_pkt(fsp);
1434out:
1435 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1436 fc_frame_free(fp);
1437}
1438
1439/*
1440 * Handle error response or timeout for REC exchange.
1441 */
1442static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1443{
1444 int error = PTR_ERR(fp);
1445
1446 if (fc_fcp_lock_pkt(fsp))
1447 goto out;
1448
1449 switch (error) {
1450 case -FC_EX_CLOSED:
1451 fc_fcp_retry_cmd(fsp);
1452 break;
1453
1454 default:
1455 FC_DBG("REC %p fid %x error unexpected error %d\n",
1456 fsp, fsp->rport->port_id, error);
1457 fsp->status_code = FC_CMD_PLOGO;
1458 /* fall through */
1459
1460 case -FC_EX_TIMEOUT:
1461 /*
1462 * Assume REC or LS_ACC was lost.
1463 * The exchange manager will have aborted REC, so retry.
1464 */
1465 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1466 fsp->rport->port_id, error, fsp->recov_retry,
1467 FC_MAX_RECOV_RETRY);
1468 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1469 fc_fcp_rec(fsp);
1470 else
1471 fc_timeout_error(fsp);
1472 break;
1473 }
1474 fc_fcp_unlock_pkt(fsp);
1475out:
1476 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1477}
1478
1479/*
1480 * Time out error routine:
1481 * abort's the I/O close the exchange and
1482 * send completion notification to scsi layer
1483 */
1484static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1485{
1486 fsp->status_code = FC_CMD_TIME_OUT;
1487 fsp->cdb_status = 0;
1488 fsp->io_status = 0;
1489 /*
1490 * if this fails then we let the scsi command timer fire and
1491 * scsi-ml escalate.
1492 */
1493 fc_fcp_send_abort(fsp);
1494}
1495
1496/*
1497 * Sequence retransmission request.
1498 * This is called after receiving status but insufficient data, or
1499 * when expecting status but the request has timed out.
1500 */
1501static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1502{
1503 struct fc_lport *lp = fsp->lp;
1504 struct fc_rport *rport;
1505 struct fc_rport_libfc_priv *rp;
1506 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1507 struct fc_seq *seq;
1508 struct fcp_srr *srr;
1509 struct fc_frame *fp;
1510 u8 cdb_op;
1511
1512 rport = fsp->rport;
1513 rp = rport->dd_data;
1514 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1515
1516 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
1517 goto retry; /* shouldn't happen */
1518 fp = fc_frame_alloc(lp, sizeof(*srr));
1519 if (!fp)
1520 goto retry;
1521
1522 srr = fc_frame_payload_get(fp, sizeof(*srr));
1523 memset(srr, 0, sizeof(*srr));
1524 srr->srr_op = ELS_SRR;
1525 srr->srr_ox_id = htons(ep->oxid);
1526 srr->srr_rx_id = htons(ep->rxid);
1527 srr->srr_r_ctl = r_ctl;
1528 srr->srr_rel_off = htonl(offset);
1529
1530 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1531 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1532 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1533
1534 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
1535 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1536 if (!seq) {
1537 fc_frame_free(fp);
1538 goto retry;
1539 }
1540 fsp->recov_seq = seq;
1541 fsp->xfer_len = offset;
1542 fsp->xfer_contig_end = offset;
1543 fsp->state &= ~FC_SRB_RCV_STATUS;
1544 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
1545 return;
1546retry:
1547 fc_fcp_retry_cmd(fsp);
1548}
1549
1550/*
1551 * Handle response from SRR.
1552 */
1553static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1554{
1555 struct fc_fcp_pkt *fsp = arg;
1556 struct fc_frame_header *fh;
1557
1558 if (IS_ERR(fp)) {
1559 fc_fcp_srr_error(fsp, fp);
1560 return;
1561 }
1562
1563 if (fc_fcp_lock_pkt(fsp))
1564 goto out;
1565
1566 fh = fc_frame_header_get(fp);
1567 /*
1568 * BUG? fc_fcp_srr_error calls exch_done which would release
1569 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1570 * then fc_exch_timeout would be sending an abort. The exch_done
1571 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1572 * an abort response though.
1573 */
1574 if (fh->fh_type == FC_TYPE_BLS) {
1575 fc_fcp_unlock_pkt(fsp);
1576 return;
1577 }
1578
1579 fsp->recov_seq = NULL;
1580 switch (fc_frame_payload_op(fp)) {
1581 case ELS_LS_ACC:
1582 fsp->recov_retry = 0;
1583 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1584 break;
1585 case ELS_LS_RJT:
1586 default:
1587 fc_timeout_error(fsp);
1588 break;
1589 }
1590 fc_fcp_unlock_pkt(fsp);
1591 fsp->lp->tt.exch_done(seq);
1592out:
1593 fc_frame_free(fp);
1594 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1595}
1596
1597static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1598{
1599 if (fc_fcp_lock_pkt(fsp))
1600 goto out;
1601 fsp->lp->tt.exch_done(fsp->recov_seq);
1602 fsp->recov_seq = NULL;
1603 switch (PTR_ERR(fp)) {
1604 case -FC_EX_TIMEOUT:
1605 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1606 fc_fcp_rec(fsp);
1607 else
1608 fc_timeout_error(fsp);
1609 break;
1610 case -FC_EX_CLOSED: /* e.g., link failure */
1611 /* fall through */
1612 default:
1613 fc_fcp_retry_cmd(fsp);
1614 break;
1615 }
1616 fc_fcp_unlock_pkt(fsp);
1617out:
1618 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1619}
1620
1621static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
1622{
1623 /* lock ? */
1624 return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
1625}
1626
1627/**
1628 * fc_queuecommand - The queuecommand function of the scsi template
1629 * @cmd: struct scsi_cmnd to be executed
1630 * @done: Callback function to be called when cmd is completed
1631 *
1632 * this is the i/o strategy routine, called by the scsi layer
1633 * this routine is called with holding the host_lock.
1634 */
1635int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1636{
1637 struct fc_lport *lp;
1638 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1639 struct fc_fcp_pkt *fsp;
1640 struct fc_rport_libfc_priv *rp;
1641 int rval;
1642 int rc = 0;
1643 struct fcoe_dev_stats *stats;
1644
1645 lp = shost_priv(sc_cmd->device->host);
1646
1647 rval = fc_remote_port_chkready(rport);
1648 if (rval) {
1649 sc_cmd->result = rval;
1650 done(sc_cmd);
1651 goto out;
1652 }
1653
1654 if (!*(struct fc_remote_port **)rport->dd_data) {
1655 /*
1656 * rport is transitioning from blocked/deleted to
1657 * online
1658 */
1659 sc_cmd->result = DID_IMM_RETRY << 16;
1660 done(sc_cmd);
1661 goto out;
1662 }
1663
1664 rp = rport->dd_data;
1665
1666 if (!fc_fcp_lport_queue_ready(lp)) {
1667 rc = SCSI_MLQUEUE_HOST_BUSY;
1668 goto out;
1669 }
1670
1671 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1672 if (fsp == NULL) {
1673 rc = SCSI_MLQUEUE_HOST_BUSY;
1674 goto out;
1675 }
1676
1677 /*
1678 * build the libfc request pkt
1679 */
1680 fsp->cmd = sc_cmd; /* save the cmd */
1681 fsp->lp = lp; /* save the softc ptr */
1682 fsp->rport = rport; /* set the remote port ptr */
1683 sc_cmd->scsi_done = done;
1684
1685 /*
1686 * set up the transfer length
1687 */
1688 fsp->data_len = scsi_bufflen(sc_cmd);
1689 fsp->xfer_len = 0;
1690
1691 /*
1692 * setup the data direction
1693 */
1694 stats = lp->dev_stats[smp_processor_id()];
1695 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1696 fsp->req_flags = FC_SRB_READ;
1697 stats->InputRequests++;
1698 stats->InputMegabytes = fsp->data_len;
1699 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1700 fsp->req_flags = FC_SRB_WRITE;
1701 stats->OutputRequests++;
1702 stats->OutputMegabytes = fsp->data_len;
1703 } else {
1704 fsp->req_flags = 0;
1705 stats->ControlRequests++;
1706 }
1707
1708 fsp->tgt_flags = rp->flags;
1709
1710 init_timer(&fsp->timer);
1711 fsp->timer.data = (unsigned long)fsp;
1712
1713 /*
1714 * send it to the lower layer
1715 * if we get -1 return then put the request in the pending
1716 * queue.
1717 */
1718 rval = fc_fcp_pkt_send(lp, fsp);
1719 if (rval != 0) {
1720 fsp->state = FC_SRB_FREE;
1721 fc_fcp_pkt_release(fsp);
1722 rc = SCSI_MLQUEUE_HOST_BUSY;
1723 }
1724out:
1725 return rc;
1726}
1727EXPORT_SYMBOL(fc_queuecommand);
1728
1729/**
1730 * fc_io_compl - Handle responses for completed commands
1731 * @fsp: scsi packet
1732 *
1733 * Translates a error to a Linux SCSI error.
1734 *
1735 * The fcp packet lock must be held when calling.
1736 */
1737static void fc_io_compl(struct fc_fcp_pkt *fsp)
1738{
1739 struct fc_fcp_internal *si;
1740 struct scsi_cmnd *sc_cmd;
1741 struct fc_lport *lp;
1742 unsigned long flags;
1743
1744 fsp->state |= FC_SRB_COMPL;
1745 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1746 spin_unlock_bh(&fsp->scsi_pkt_lock);
1747 del_timer_sync(&fsp->timer);
1748 spin_lock_bh(&fsp->scsi_pkt_lock);
1749 }
1750
1751 lp = fsp->lp;
1752 si = fc_get_scsi_internal(lp);
1753 spin_lock_irqsave(lp->host->host_lock, flags);
1754 if (!fsp->cmd) {
1755 spin_unlock_irqrestore(lp->host->host_lock, flags);
1756 return;
1757 }
1758
1759 /*
1760 * if a command timed out while we had to try and throttle IO
1761 * and it is now getting cleaned up, then we are about to
1762 * try again so clear the throttled flag incase we get more
1763 * time outs.
1764 */
1765 if (si->throttled && fsp->state & FC_SRB_NOMEM)
1766 si->throttled = 0;
1767
1768 sc_cmd = fsp->cmd;
1769 fsp->cmd = NULL;
1770
1771 if (!sc_cmd->SCp.ptr) {
1772 spin_unlock_irqrestore(lp->host->host_lock, flags);
1773 return;
1774 }
1775
1776 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1777 switch (fsp->status_code) {
1778 case FC_COMPLETE:
1779 if (fsp->cdb_status == 0) {
1780 /*
1781 * good I/O status
1782 */
1783 sc_cmd->result = DID_OK << 16;
1784 if (fsp->scsi_resid)
1785 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1786 } else if (fsp->cdb_status == QUEUE_FULL) {
1787 struct scsi_device *tmp_sdev;
1788 struct scsi_device *sdev = sc_cmd->device;
1789
1790 shost_for_each_device(tmp_sdev, sdev->host) {
1791 if (tmp_sdev->id != sdev->id)
1792 continue;
1793
1794 if (tmp_sdev->queue_depth > 1) {
1795 scsi_track_queue_full(tmp_sdev,
1796 tmp_sdev->
1797 queue_depth - 1);
1798 }
1799 }
1800 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1801 } else {
1802 /*
1803 * transport level I/O was ok but scsi
1804 * has non zero status
1805 */
1806 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1807 }
1808 break;
1809 case FC_ERROR:
1810 sc_cmd->result = DID_ERROR << 16;
1811 break;
1812 case FC_DATA_UNDRUN:
1813 if (fsp->cdb_status == 0) {
1814 /*
1815 * scsi status is good but transport level
1816 * underrun. for read it should be an error??
1817 */
1818 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1819 } else {
1820 /*
1821 * scsi got underrun, this is an error
1822 */
1823 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1824 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1825 }
1826 break;
1827 case FC_DATA_OVRRUN:
1828 /*
1829 * overrun is an error
1830 */
1831 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1832 break;
1833 case FC_CMD_ABORTED:
1834 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
1835 break;
1836 case FC_CMD_TIME_OUT:
1837 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1838 break;
1839 case FC_CMD_RESET:
1840 sc_cmd->result = (DID_RESET << 16);
1841 break;
1842 case FC_HRD_ERROR:
1843 sc_cmd->result = (DID_NO_CONNECT << 16);
1844 break;
1845 default:
1846 sc_cmd->result = (DID_ERROR << 16);
1847 break;
1848 }
1849
1850 list_del(&fsp->list);
1851 sc_cmd->SCp.ptr = NULL;
1852 sc_cmd->scsi_done(sc_cmd);
1853 spin_unlock_irqrestore(lp->host->host_lock, flags);
1854
1855 /* release ref from initial allocation in queue command */
1856 fc_fcp_pkt_release(fsp);
1857}
1858
1859/**
1860 * fc_fcp_complete - complete processing of a fcp packet
1861 * @fsp: fcp packet
1862 *
1863 * This function may sleep if a fsp timer is pending.
1864 * The host lock must not be held by caller.
1865 */
1866void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1867{
1868 if (fc_fcp_lock_pkt(fsp))
1869 return;
1870
1871 fc_fcp_complete_locked(fsp);
1872 fc_fcp_unlock_pkt(fsp);
1873}
1874EXPORT_SYMBOL(fc_fcp_complete);
1875
1876/**
1877 * fc_eh_abort - Abort a command...from scsi host template
1878 * @sc_cmd: scsi command to abort
1879 *
1880 * send ABTS to the target device and wait for the response
1881 * sc_cmd is the pointer to the command to be aborted.
1882 */
1883int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1884{
1885 struct fc_fcp_pkt *fsp;
1886 struct fc_lport *lp;
1887 int rc = FAILED;
1888 unsigned long flags;
1889
1890 lp = shost_priv(sc_cmd->device->host);
1891 if (lp->state != LPORT_ST_READY)
1892 return rc;
1893 else if (!(lp->link_status & FC_LINK_UP))
1894 return rc;
1895
1896 spin_lock_irqsave(lp->host->host_lock, flags);
1897 fsp = CMD_SP(sc_cmd);
1898 if (!fsp) {
1899 /* command completed while scsi eh was setting up */
1900 spin_unlock_irqrestore(lp->host->host_lock, flags);
1901 return SUCCESS;
1902 }
1903 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1904 fc_fcp_pkt_hold(fsp);
1905 spin_unlock_irqrestore(lp->host->host_lock, flags);
1906
1907 if (fc_fcp_lock_pkt(fsp)) {
1908 /* completed while we were waiting for timer to be deleted */
1909 rc = SUCCESS;
1910 goto release_pkt;
1911 }
1912
1913 rc = fc_fcp_pkt_abort(lp, fsp);
1914 fc_fcp_unlock_pkt(fsp);
1915
1916release_pkt:
1917 fc_fcp_pkt_release(fsp);
1918 return rc;
1919}
1920EXPORT_SYMBOL(fc_eh_abort);
1921
1922/**
1923 * fc_eh_device_reset: Reset a single LUN
1924 * @sc_cmd: scsi command
1925 *
1926 * Set from scsi host template to send tm cmd to the target and wait for the
1927 * response.
1928 */
1929int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1930{
1931 struct fc_lport *lp;
1932 struct fc_fcp_pkt *fsp;
1933 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1934 int rc = FAILED;
1935 struct fc_rport_libfc_priv *rp;
1936 int rval;
1937
1938 rval = fc_remote_port_chkready(rport);
1939 if (rval)
1940 goto out;
1941
1942 rp = rport->dd_data;
1943 lp = shost_priv(sc_cmd->device->host);
1944
1945 if (lp->state != LPORT_ST_READY)
1946 return rc;
1947
1948 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1949 if (fsp == NULL) {
1950 FC_DBG("could not allocate scsi_pkt\n");
1951 sc_cmd->result = DID_NO_CONNECT << 16;
1952 goto out;
1953 }
1954
1955 /*
1956 * Build the libfc request pkt. Do not set the scsi cmnd, because
1957 * the sc passed in is not setup for execution like when sent
1958 * through the queuecommand callout.
1959 */
1960 fsp->lp = lp; /* save the softc ptr */
1961 fsp->rport = rport; /* set the remote port ptr */
1962
1963 /*
1964 * flush outstanding commands
1965 */
1966 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
1967 fsp->state = FC_SRB_FREE;
1968 fc_fcp_pkt_release(fsp);
1969
1970out:
1971 return rc;
1972}
1973EXPORT_SYMBOL(fc_eh_device_reset);
1974
1975/**
1976 * fc_eh_host_reset - The reset function will reset the ports on the host.
1977 * @sc_cmd: scsi command
1978 */
1979int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
1980{
1981 struct Scsi_Host *shost = sc_cmd->device->host;
1982 struct fc_lport *lp = shost_priv(shost);
1983 unsigned long wait_tmo;
1984
1985 lp->tt.lport_reset(lp);
1986 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
1987 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
1988 msleep(1000);
1989
1990 if (fc_fcp_lport_queue_ready(lp)) {
1991 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
1992 return SUCCESS;
1993 } else {
1994 shost_printk(KERN_INFO, shost, "Host reset failed. "
1995 "lport not ready.\n");
1996 return FAILED;
1997 }
1998}
1999EXPORT_SYMBOL(fc_eh_host_reset);
2000
2001/**
2002 * fc_slave_alloc - configure queue depth
2003 * @sdev: scsi device
2004 *
2005 * Configures queue depth based on host's cmd_per_len. If not set
2006 * then we use the libfc default.
2007 */
2008int fc_slave_alloc(struct scsi_device *sdev)
2009{
2010 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2011 int queue_depth;
2012
2013 if (!rport || fc_remote_port_chkready(rport))
2014 return -ENXIO;
2015
2016 if (sdev->tagged_supported) {
2017 if (sdev->host->hostt->cmd_per_lun)
2018 queue_depth = sdev->host->hostt->cmd_per_lun;
2019 else
2020 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
2021 scsi_activate_tcq(sdev, queue_depth);
2022 }
2023 return 0;
2024}
2025EXPORT_SYMBOL(fc_slave_alloc);
2026
2027int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2028{
2029 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2030 return sdev->queue_depth;
2031}
2032EXPORT_SYMBOL(fc_change_queue_depth);
2033
2034int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2035{
2036 if (sdev->tagged_supported) {
2037 scsi_set_tag_type(sdev, tag_type);
2038 if (tag_type)
2039 scsi_activate_tcq(sdev, sdev->queue_depth);
2040 else
2041 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2042 } else
2043 tag_type = 0;
2044
2045 return tag_type;
2046}
2047EXPORT_SYMBOL(fc_change_queue_type);
2048
2049void fc_fcp_destroy(struct fc_lport *lp)
2050{
2051 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2052
2053 if (!list_empty(&si->scsi_pkt_queue))
2054 printk(KERN_ERR "Leaked scsi packets.\n");
2055
2056 mempool_destroy(si->scsi_pkt_pool);
2057 kfree(si);
2058 lp->scsi_priv = NULL;
2059}
2060EXPORT_SYMBOL(fc_fcp_destroy);
2061
2062int fc_fcp_init(struct fc_lport *lp)
2063{
2064 int rc;
2065 struct fc_fcp_internal *si;
2066
2067 if (!lp->tt.fcp_cmd_send)
2068 lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
2069
2070 if (!lp->tt.fcp_cleanup)
2071 lp->tt.fcp_cleanup = fc_fcp_cleanup;
2072
2073 if (!lp->tt.fcp_abort_io)
2074 lp->tt.fcp_abort_io = fc_fcp_abort_io;
2075
2076 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2077 if (!si)
2078 return -ENOMEM;
2079 lp->scsi_priv = si;
2080 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2081
2082 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2083 if (!si->scsi_pkt_pool) {
2084 rc = -ENOMEM;
2085 goto free_internal;
2086 }
2087 return 0;
2088
2089free_internal:
2090 kfree(si);
2091 return rc;
2092}
2093EXPORT_SYMBOL(fc_fcp_init);
2094
2095static int __init libfc_init(void)
2096{
2097 int rc;
2098
2099 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2100 sizeof(struct fc_fcp_pkt),
2101 0, SLAB_HWCACHE_ALIGN, NULL);
2102 if (scsi_pkt_cachep == NULL) {
2103 FC_DBG("Unable to allocate SRB cache...module load failed!");
2104 return -ENOMEM;
2105 }
2106
2107 rc = fc_setup_exch_mgr();
2108 if (rc)
2109 goto destroy_pkt_cache;
2110
2111 rc = fc_setup_rport();
2112 if (rc)
2113 goto destroy_em;
2114
2115 return rc;
2116destroy_em:
2117 fc_destroy_exch_mgr();
2118destroy_pkt_cache:
2119 kmem_cache_destroy(scsi_pkt_cachep);
2120 return rc;
2121}
2122
2123static void __exit libfc_exit(void)
2124{
2125 kmem_cache_destroy(scsi_pkt_cachep);
2126 fc_destroy_exch_mgr();
2127 fc_destroy_rport();
2128}
2129
2130module_init(libfc_init);
2131module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000000..63fe00cfe667
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Frame allocation.
22 */
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <linux/crc32.h>
27
28#include <scsi/fc_frame.h>
29
30/*
31 * Check the CRC in a frame.
32 */
33u32 fc_frame_crc_check(struct fc_frame *fp)
34{
35 u32 crc;
36 u32 error;
37 const u8 *bp;
38 unsigned int len;
39
40 WARN_ON(!fc_frame_is_linear(fp));
41 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
42 len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
43 bp = (const u8 *) fr_hdr(fp);
44 crc = ~crc32(~0, bp, len);
45 error = crc ^ fr_crc(fp);
46 return error;
47}
48EXPORT_SYMBOL(fc_frame_crc_check);
49
50/*
51 * Allocate a frame intended to be sent via fcoe_xmit.
52 * Get an sk_buff for the frame and set the length.
53 */
54struct fc_frame *__fc_frame_alloc(size_t len)
55{
56 struct fc_frame *fp;
57 struct sk_buff *skb;
58
59 WARN_ON((len % sizeof(u32)) != 0);
60 len += sizeof(struct fc_frame_header);
61 skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
62 if (!skb)
63 return NULL;
64 fp = (struct fc_frame *) skb;
65 fc_frame_init(fp);
66 skb_reserve(skb, FC_FRAME_HEADROOM);
67 skb_put(skb, len);
68 return fp;
69}
70EXPORT_SYMBOL(__fc_frame_alloc);
71
72
73struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
74{
75 struct fc_frame *fp;
76 size_t fill;
77
78 fill = payload_len % 4;
79 if (fill != 0)
80 fill = 4 - fill;
81 fp = __fc_frame_alloc(payload_len + fill);
82 if (fp) {
83 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
84 /* trim is OK, we just allocated it so there are no fragments */
85 skb_trim(fp_skb(fp),
86 payload_len + sizeof(struct fc_frame_header));
87 }
88 return fp;
89}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000000..0b9bdb1fb807
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,1604 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
98/* Fabric IDs to use for point-to-point mode, chosen on whims. */
99#define FC_LOCAL_PTP_FID_LO 0x010101
100#define FC_LOCAL_PTP_FID_HI 0x010102
101
102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
103
104static int fc_lport_debug;
105
106#define FC_DEBUG_LPORT(fmt...) \
107 do { \
108 if (fc_lport_debug) \
109 FC_DBG(fmt); \
110 } while (0)
111
112static void fc_lport_error(struct fc_lport *, struct fc_frame *);
113
114static void fc_lport_enter_reset(struct fc_lport *);
115static void fc_lport_enter_flogi(struct fc_lport *);
116static void fc_lport_enter_dns(struct fc_lport *);
117static void fc_lport_enter_rpn_id(struct fc_lport *);
118static void fc_lport_enter_rft_id(struct fc_lport *);
119static void fc_lport_enter_scr(struct fc_lport *);
120static void fc_lport_enter_ready(struct fc_lport *);
121static void fc_lport_enter_logo(struct fc_lport *);
122
123static const char *fc_lport_state_names[] = {
124 [LPORT_ST_NONE] = "none",
125 [LPORT_ST_FLOGI] = "FLOGI",
126 [LPORT_ST_DNS] = "dNS",
127 [LPORT_ST_RPN_ID] = "RPN_ID",
128 [LPORT_ST_RFT_ID] = "RFT_ID",
129 [LPORT_ST_SCR] = "SCR",
130 [LPORT_ST_READY] = "Ready",
131 [LPORT_ST_LOGO] = "LOGO",
132 [LPORT_ST_RESET] = "reset",
133};
134
135static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
136{
137 fc_frame_free(fp);
138 return 0;
139}
140
141/**
142 * fc_lport_rport_callback - Event handler for rport events
143 * @lport: The lport which is receiving the event
144 * @rport: The rport which the event has occured on
145 * @event: The event that occured
146 *
147 * Locking Note: The rport lock should not be held when calling
148 * this function.
149 */
150static void fc_lport_rport_callback(struct fc_lport *lport,
151 struct fc_rport *rport,
152 enum fc_rport_event event)
153{
154 FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
155 rport->port_id);
156
157 switch (event) {
158 case RPORT_EV_CREATED:
159 if (rport->port_id == FC_FID_DIR_SERV) {
160 mutex_lock(&lport->lp_mutex);
161 if (lport->state == LPORT_ST_DNS) {
162 lport->dns_rp = rport;
163 fc_lport_enter_rpn_id(lport);
164 } else {
165 FC_DEBUG_LPORT("Received an CREATED event on "
166 "port (%6x) for the directory "
167 "server, but the lport is not "
168 "in the DNS state, it's in the "
169 "%d state", rport->port_id,
170 lport->state);
171 lport->tt.rport_logoff(rport);
172 }
173 mutex_unlock(&lport->lp_mutex);
174 } else
175 FC_DEBUG_LPORT("Received an event for port (%6x) "
176 "which is not the directory server\n",
177 rport->port_id);
178 break;
179 case RPORT_EV_LOGO:
180 case RPORT_EV_FAILED:
181 case RPORT_EV_STOP:
182 if (rport->port_id == FC_FID_DIR_SERV) {
183 mutex_lock(&lport->lp_mutex);
184 lport->dns_rp = NULL;
185 mutex_unlock(&lport->lp_mutex);
186
187 } else
188 FC_DEBUG_LPORT("Received an event for port (%6x) "
189 "which is not the directory server\n",
190 rport->port_id);
191 break;
192 case RPORT_EV_NONE:
193 break;
194 }
195}
196
197/**
198 * fc_lport_state - Return a string which represents the lport's state
199 * @lport: The lport whose state is to converted to a string
200 */
201static const char *fc_lport_state(struct fc_lport *lport)
202{
203 const char *cp;
204
205 cp = fc_lport_state_names[lport->state];
206 if (!cp)
207 cp = "unknown";
208 return cp;
209}
210
211/**
212 * fc_lport_ptp_setup - Create an rport for point-to-point mode
213 * @lport: The lport to attach the ptp rport to
214 * @fid: The FID of the ptp rport
215 * @remote_wwpn: The WWPN of the ptp rport
216 * @remote_wwnn: The WWNN of the ptp rport
217 */
218static void fc_lport_ptp_setup(struct fc_lport *lport,
219 u32 remote_fid, u64 remote_wwpn,
220 u64 remote_wwnn)
221{
222 struct fc_disc_port dp;
223
224 dp.lp = lport;
225 dp.ids.port_id = remote_fid;
226 dp.ids.port_name = remote_wwpn;
227 dp.ids.node_name = remote_wwnn;
228 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
229
230 if (lport->ptp_rp) {
231 lport->tt.rport_logoff(lport->ptp_rp);
232 lport->ptp_rp = NULL;
233 }
234
235 lport->ptp_rp = fc_rport_rogue_create(&dp);
236
237 lport->tt.rport_login(lport->ptp_rp);
238
239 fc_lport_enter_ready(lport);
240}
241
242void fc_get_host_port_type(struct Scsi_Host *shost)
243{
244 /* TODO - currently just NPORT */
245 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
246}
247EXPORT_SYMBOL(fc_get_host_port_type);
248
249void fc_get_host_port_state(struct Scsi_Host *shost)
250{
251 struct fc_lport *lp = shost_priv(shost);
252
253 if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
254 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
255 else
256 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
257}
258EXPORT_SYMBOL(fc_get_host_port_state);
259
260void fc_get_host_speed(struct Scsi_Host *shost)
261{
262 struct fc_lport *lport = shost_priv(shost);
263
264 fc_host_speed(shost) = lport->link_speed;
265}
266EXPORT_SYMBOL(fc_get_host_speed);
267
268struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
269{
270 int i;
271 struct fc_host_statistics *fcoe_stats;
272 struct fc_lport *lp = shost_priv(shost);
273 struct timespec v0, v1;
274
275 fcoe_stats = &lp->host_stats;
276 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
277
278 jiffies_to_timespec(jiffies, &v0);
279 jiffies_to_timespec(lp->boot_time, &v1);
280 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
281
282 for_each_online_cpu(i) {
283 struct fcoe_dev_stats *stats = lp->dev_stats[i];
284 if (stats == NULL)
285 continue;
286 fcoe_stats->tx_frames += stats->TxFrames;
287 fcoe_stats->tx_words += stats->TxWords;
288 fcoe_stats->rx_frames += stats->RxFrames;
289 fcoe_stats->rx_words += stats->RxWords;
290 fcoe_stats->error_frames += stats->ErrorFrames;
291 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
292 fcoe_stats->fcp_input_requests += stats->InputRequests;
293 fcoe_stats->fcp_output_requests += stats->OutputRequests;
294 fcoe_stats->fcp_control_requests += stats->ControlRequests;
295 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
296 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
297 fcoe_stats->link_failure_count += stats->LinkFailureCount;
298 }
299 fcoe_stats->lip_count = -1;
300 fcoe_stats->nos_count = -1;
301 fcoe_stats->loss_of_sync_count = -1;
302 fcoe_stats->loss_of_signal_count = -1;
303 fcoe_stats->prim_seq_protocol_err_count = -1;
304 fcoe_stats->dumped_frames = -1;
305 return fcoe_stats;
306}
307EXPORT_SYMBOL(fc_get_host_stats);
308
309/*
310 * Fill in FLOGI command for request.
311 */
312static void
313fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
314 unsigned int op)
315{
316 struct fc_els_csp *sp;
317 struct fc_els_cssp *cp;
318
319 memset(flogi, 0, sizeof(*flogi));
320 flogi->fl_cmd = (u8) op;
321 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
322 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
323 sp = &flogi->fl_csp;
324 sp->sp_hi_ver = 0x20;
325 sp->sp_lo_ver = 0x20;
326 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
327 sp->sp_bb_data = htons((u16) lport->mfs);
328 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
329 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
330 if (op != ELS_FLOGI) {
331 sp->sp_features = htons(FC_SP_FT_CIRO);
332 sp->sp_tot_seq = htons(255); /* seq. we accept */
333 sp->sp_rel_off = htons(0x1f);
334 sp->sp_e_d_tov = htonl(lport->e_d_tov);
335
336 cp->cp_rdfs = htons((u16) lport->mfs);
337 cp->cp_con_seq = htons(255);
338 cp->cp_open_seq = 1;
339 }
340}
341
342/*
343 * Add a supported FC-4 type.
344 */
345static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
346{
347 __be32 *mp;
348
349 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
350 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
351}
352
353/**
354 * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
355 * @lport: Fibre Channel local port recieving the RLIR
356 * @sp: current sequence in the RLIR exchange
357 * @fp: RLIR request frame
358 *
359 * Locking Note: The lport lock is exected to be held before calling
360 * this function.
361 */
362static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
363 struct fc_lport *lport)
364{
365 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
366 fc_lport_state(lport));
367
368 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
369 fc_frame_free(fp);
370}
371
372/**
373 * fc_lport_recv_echo_req - Handle received ECHO request
374 * @lport: Fibre Channel local port recieving the ECHO
375 * @sp: current sequence in the ECHO exchange
376 * @fp: ECHO request frame
377 *
378 * Locking Note: The lport lock is exected to be held before calling
379 * this function.
380 */
381static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
382 struct fc_lport *lport)
383{
384 struct fc_frame *fp;
385 struct fc_exch *ep = fc_seq_exch(sp);
386 unsigned int len;
387 void *pp;
388 void *dp;
389 u32 f_ctl;
390
391 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
392 fc_lport_state(lport));
393
394 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
395 pp = fc_frame_payload_get(in_fp, len);
396
397 if (len < sizeof(__be32))
398 len = sizeof(__be32);
399
400 fp = fc_frame_alloc(lport, len);
401 if (fp) {
402 dp = fc_frame_payload_get(fp, len);
403 memcpy(dp, pp, len);
404 *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
405 sp = lport->tt.seq_start_next(sp);
406 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
407 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
408 FC_TYPE_ELS, f_ctl, 0);
409 lport->tt.seq_send(lport, sp, fp);
410 }
411 fc_frame_free(in_fp);
412}
413
414/**
415 * fc_lport_recv_echo_req - Handle received Request Node ID data request
416 * @lport: Fibre Channel local port recieving the RNID
417 * @sp: current sequence in the RNID exchange
418 * @fp: RNID request frame
419 *
420 * Locking Note: The lport lock is exected to be held before calling
421 * this function.
422 */
423static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
424 struct fc_lport *lport)
425{
426 struct fc_frame *fp;
427 struct fc_exch *ep = fc_seq_exch(sp);
428 struct fc_els_rnid *req;
429 struct {
430 struct fc_els_rnid_resp rnid;
431 struct fc_els_rnid_cid cid;
432 struct fc_els_rnid_gen gen;
433 } *rp;
434 struct fc_seq_els_data rjt_data;
435 u8 fmt;
436 size_t len;
437 u32 f_ctl;
438
439 FC_DEBUG_LPORT("Received RNID request while in state %s\n",
440 fc_lport_state(lport));
441
442 req = fc_frame_payload_get(in_fp, sizeof(*req));
443 if (!req) {
444 rjt_data.fp = NULL;
445 rjt_data.reason = ELS_RJT_LOGIC;
446 rjt_data.explan = ELS_EXPL_NONE;
447 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
448 } else {
449 fmt = req->rnid_fmt;
450 len = sizeof(*rp);
451 if (fmt != ELS_RNIDF_GEN ||
452 ntohl(lport->rnid_gen.rnid_atype) == 0) {
453 fmt = ELS_RNIDF_NONE; /* nothing to provide */
454 len -= sizeof(rp->gen);
455 }
456 fp = fc_frame_alloc(lport, len);
457 if (fp) {
458 rp = fc_frame_payload_get(fp, len);
459 memset(rp, 0, len);
460 rp->rnid.rnid_cmd = ELS_LS_ACC;
461 rp->rnid.rnid_fmt = fmt;
462 rp->rnid.rnid_cid_len = sizeof(rp->cid);
463 rp->cid.rnid_wwpn = htonll(lport->wwpn);
464 rp->cid.rnid_wwnn = htonll(lport->wwnn);
465 if (fmt == ELS_RNIDF_GEN) {
466 rp->rnid.rnid_sid_len = sizeof(rp->gen);
467 memcpy(&rp->gen, &lport->rnid_gen,
468 sizeof(rp->gen));
469 }
470 sp = lport->tt.seq_start_next(sp);
471 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
472 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
473 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
474 FC_TYPE_ELS, f_ctl, 0);
475 lport->tt.seq_send(lport, sp, fp);
476 }
477 }
478 fc_frame_free(in_fp);
479}
480
481/**
482 * fc_lport_recv_adisc_req - Handle received Address Discovery Request
483 * @lport: Fibre Channel local port recieving the ADISC
484 * @sp: current sequence in the ADISC exchange
485 * @fp: ADISC request frame
486 *
487 * Locking Note: The lport lock is expected to be held before calling
488 * this function.
489 */
490static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
491 struct fc_lport *lport)
492{
493 struct fc_frame *fp;
494 struct fc_exch *ep = fc_seq_exch(sp);
495 struct fc_els_adisc *req, *rp;
496 struct fc_seq_els_data rjt_data;
497 size_t len;
498 u32 f_ctl;
499
500 FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
501 fc_lport_state(lport));
502
503 req = fc_frame_payload_get(in_fp, sizeof(*req));
504 if (!req) {
505 rjt_data.fp = NULL;
506 rjt_data.reason = ELS_RJT_LOGIC;
507 rjt_data.explan = ELS_EXPL_NONE;
508 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
509 } else {
510 len = sizeof(*rp);
511 fp = fc_frame_alloc(lport, len);
512 if (fp) {
513 rp = fc_frame_payload_get(fp, len);
514 memset(rp, 0, len);
515 rp->adisc_cmd = ELS_LS_ACC;
516 rp->adisc_wwpn = htonll(lport->wwpn);
517 rp->adisc_wwnn = htonll(lport->wwnn);
518 hton24(rp->adisc_port_id,
519 fc_host_port_id(lport->host));
520 sp = lport->tt.seq_start_next(sp);
521 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
522 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
523 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
524 FC_TYPE_ELS, f_ctl, 0);
525 lport->tt.seq_send(lport, sp, fp);
526 }
527 }
528 fc_frame_free(in_fp);
529}
530
531/**
532 * fc_lport_recv_logo_req - Handle received fabric LOGO request
533 * @lport: Fibre Channel local port recieving the LOGO
534 * @sp: current sequence in the LOGO exchange
535 * @fp: LOGO request frame
536 *
537 * Locking Note: The lport lock is exected to be held before calling
538 * this function.
539 */
540static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
541 struct fc_lport *lport)
542{
543 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
544 fc_lport_enter_reset(lport);
545 fc_frame_free(fp);
546}
547
548/**
549 * fc_fabric_login - Start the lport state machine
550 * @lport: The lport that should log into the fabric
551 *
552 * Locking Note: This function should not be called
553 * with the lport lock held.
554 */
555int fc_fabric_login(struct fc_lport *lport)
556{
557 int rc = -1;
558
559 mutex_lock(&lport->lp_mutex);
560 if (lport->state == LPORT_ST_NONE) {
561 fc_lport_enter_reset(lport);
562 rc = 0;
563 }
564 mutex_unlock(&lport->lp_mutex);
565
566 return rc;
567}
568EXPORT_SYMBOL(fc_fabric_login);
569
570/**
571 * fc_linkup - Handler for transport linkup events
572 * @lport: The lport whose link is up
573 */
574void fc_linkup(struct fc_lport *lport)
575{
576 FC_DEBUG_LPORT("Link is up for port (%6x)\n",
577 fc_host_port_id(lport->host));
578
579 mutex_lock(&lport->lp_mutex);
580 if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
581 lport->link_status |= FC_LINK_UP;
582
583 if (lport->state == LPORT_ST_RESET)
584 fc_lport_enter_flogi(lport);
585 }
586 mutex_unlock(&lport->lp_mutex);
587}
588EXPORT_SYMBOL(fc_linkup);
589
590/**
591 * fc_linkdown - Handler for transport linkdown events
592 * @lport: The lport whose link is down
593 */
594void fc_linkdown(struct fc_lport *lport)
595{
596 mutex_lock(&lport->lp_mutex);
597 FC_DEBUG_LPORT("Link is down for port (%6x)\n",
598 fc_host_port_id(lport->host));
599
600 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
601 lport->link_status &= ~(FC_LINK_UP);
602 fc_lport_enter_reset(lport);
603 lport->tt.fcp_cleanup(lport);
604 }
605 mutex_unlock(&lport->lp_mutex);
606}
607EXPORT_SYMBOL(fc_linkdown);
608
609/**
610 * fc_pause - Pause the flow of frames
611 * @lport: The lport to be paused
612 */
613void fc_pause(struct fc_lport *lport)
614{
615 mutex_lock(&lport->lp_mutex);
616 lport->link_status |= FC_PAUSE;
617 mutex_unlock(&lport->lp_mutex);
618}
619EXPORT_SYMBOL(fc_pause);
620
621/**
622 * fc_unpause - Unpause the flow of frames
623 * @lport: The lport to be unpaused
624 */
625void fc_unpause(struct fc_lport *lport)
626{
627 mutex_lock(&lport->lp_mutex);
628 lport->link_status &= ~(FC_PAUSE);
629 mutex_unlock(&lport->lp_mutex);
630}
631EXPORT_SYMBOL(fc_unpause);
632
633/**
634 * fc_fabric_logoff - Logout of the fabric
635 * @lport: fc_lport pointer to logoff the fabric
636 *
637 * Return value:
638 * 0 for success, -1 for failure
639 **/
640int fc_fabric_logoff(struct fc_lport *lport)
641{
642 lport->tt.disc_stop_final(lport);
643 mutex_lock(&lport->lp_mutex);
644 fc_lport_enter_logo(lport);
645 mutex_unlock(&lport->lp_mutex);
646 return 0;
647}
648EXPORT_SYMBOL(fc_fabric_logoff);
649
650/**
651 * fc_lport_destroy - unregister a fc_lport
652 * @lport: fc_lport pointer to unregister
653 *
654 * Return value:
655 * None
656 * Note:
657 * exit routine for fc_lport instance
658 * clean-up all the allocated memory
659 * and free up other system resources.
660 *
661 **/
662int fc_lport_destroy(struct fc_lport *lport)
663{
664 lport->tt.frame_send = fc_frame_drop;
665 lport->tt.fcp_abort_io(lport);
666 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
667 return 0;
668}
669EXPORT_SYMBOL(fc_lport_destroy);
670
671/**
672 * fc_set_mfs - sets up the mfs for the corresponding fc_lport
673 * @lport: fc_lport pointer to unregister
674 * @mfs: the new mfs for fc_lport
675 *
676 * Set mfs for the given fc_lport to the new mfs.
677 *
678 * Return: 0 for success
679 *
680 **/
681int fc_set_mfs(struct fc_lport *lport, u32 mfs)
682{
683 unsigned int old_mfs;
684 int rc = -EINVAL;
685
686 mutex_lock(&lport->lp_mutex);
687
688 old_mfs = lport->mfs;
689
690 if (mfs >= FC_MIN_MAX_FRAME) {
691 mfs &= ~3;
692 if (mfs > FC_MAX_FRAME)
693 mfs = FC_MAX_FRAME;
694 mfs -= sizeof(struct fc_frame_header);
695 lport->mfs = mfs;
696 rc = 0;
697 }
698
699 if (!rc && mfs < old_mfs)
700 fc_lport_enter_reset(lport);
701
702 mutex_unlock(&lport->lp_mutex);
703
704 return rc;
705}
706EXPORT_SYMBOL(fc_set_mfs);
707
708/**
709 * fc_lport_disc_callback - Callback for discovery events
710 * @lport: FC local port
711 * @event: The discovery event
712 */
713void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
714{
715 switch (event) {
716 case DISC_EV_SUCCESS:
717 FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
718 fc_host_port_id(lport->host));
719 break;
720 case DISC_EV_FAILED:
721 FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
722 fc_host_port_id(lport->host));
723 mutex_lock(&lport->lp_mutex);
724 fc_lport_enter_reset(lport);
725 mutex_unlock(&lport->lp_mutex);
726 break;
727 case DISC_EV_NONE:
728 WARN_ON(1);
729 break;
730 }
731}
732
733/**
734 * fc_rport_enter_ready - Enter the ready state and start discovery
735 * @lport: Fibre Channel local port that is ready
736 *
737 * Locking Note: The lport lock is expected to be held before calling
738 * this routine.
739 */
740static void fc_lport_enter_ready(struct fc_lport *lport)
741{
742 FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
743 fc_host_port_id(lport->host), fc_lport_state(lport));
744
745 fc_lport_state_enter(lport, LPORT_ST_READY);
746
747 lport->tt.disc_start(fc_lport_disc_callback, lport);
748}
749
750/**
751 * fc_lport_recv_flogi_req - Receive a FLOGI request
752 * @sp_in: The sequence the FLOGI is on
753 * @rx_fp: The frame the FLOGI is in
754 * @lport: The lport that recieved the request
755 *
756 * A received FLOGI request indicates a point-to-point connection.
757 * Accept it with the common service parameters indicating our N port.
758 * Set up to do a PLOGI if we have the higher-number WWPN.
759 *
760 * Locking Note: The lport lock is exected to be held before calling
761 * this function.
762 */
763static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
764 struct fc_frame *rx_fp,
765 struct fc_lport *lport)
766{
767 struct fc_frame *fp;
768 struct fc_frame_header *fh;
769 struct fc_seq *sp;
770 struct fc_exch *ep;
771 struct fc_els_flogi *flp;
772 struct fc_els_flogi *new_flp;
773 u64 remote_wwpn;
774 u32 remote_fid;
775 u32 local_fid;
776 u32 f_ctl;
777
778 FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
779 fc_lport_state(lport));
780
781 fh = fc_frame_header_get(rx_fp);
782 remote_fid = ntoh24(fh->fh_s_id);
783 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
784 if (!flp)
785 goto out;
786 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
787 if (remote_wwpn == lport->wwpn) {
788 FC_DBG("FLOGI from port with same WWPN %llx "
789 "possible configuration error\n", remote_wwpn);
790 goto out;
791 }
792 FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
793
794 /*
795 * XXX what is the right thing to do for FIDs?
796 * The originator might expect our S_ID to be 0xfffffe.
797 * But if so, both of us could end up with the same FID.
798 */
799 local_fid = FC_LOCAL_PTP_FID_LO;
800 if (remote_wwpn < lport->wwpn) {
801 local_fid = FC_LOCAL_PTP_FID_HI;
802 if (!remote_fid || remote_fid == local_fid)
803 remote_fid = FC_LOCAL_PTP_FID_LO;
804 } else if (!remote_fid) {
805 remote_fid = FC_LOCAL_PTP_FID_HI;
806 }
807
808 fc_host_port_id(lport->host) = local_fid;
809
810 fp = fc_frame_alloc(lport, sizeof(*flp));
811 if (fp) {
812 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
813 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
814 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
815 new_flp->fl_cmd = (u8) ELS_LS_ACC;
816
817 /*
818 * Send the response. If this fails, the originator should
819 * repeat the sequence.
820 */
821 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
822 ep = fc_seq_exch(sp);
823 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
824 FC_TYPE_ELS, f_ctl, 0);
825 lport->tt.seq_send(lport, sp, fp);
826
827 } else {
828 fc_lport_error(lport, fp);
829 }
830 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
831 get_unaligned_be64(&flp->fl_wwnn));
832
833 lport->tt.disc_start(fc_lport_disc_callback, lport);
834
835out:
836 sp = fr_seq(rx_fp);
837 fc_frame_free(rx_fp);
838}
839
840/**
841 * fc_lport_recv_req - The generic lport request handler
842 * @lport: The lport that received the request
843 * @sp: The sequence the request is on
844 * @fp: The frame the request is in
845 *
846 * This function will see if the lport handles the request or
847 * if an rport should handle the request.
848 *
849 * Locking Note: This function should not be called with the lport
850 * lock held becuase it will grab the lock.
851 */
852static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
853 struct fc_frame *fp)
854{
855 struct fc_frame_header *fh = fc_frame_header_get(fp);
856 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
857 struct fc_rport *rport;
858 u32 s_id;
859 u32 d_id;
860 struct fc_seq_els_data rjt_data;
861
862 mutex_lock(&lport->lp_mutex);
863
864 /*
865 * Handle special ELS cases like FLOGI, LOGO, and
866 * RSCN here. These don't require a session.
867 * Even if we had a session, it might not be ready.
868 */
869 if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
870 /*
871 * Check opcode.
872 */
873 recv = NULL;
874 switch (fc_frame_payload_op(fp)) {
875 case ELS_FLOGI:
876 recv = fc_lport_recv_flogi_req;
877 break;
878 case ELS_LOGO:
879 fh = fc_frame_header_get(fp);
880 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
881 recv = fc_lport_recv_logo_req;
882 break;
883 case ELS_RSCN:
884 recv = lport->tt.disc_recv_req;
885 break;
886 case ELS_ECHO:
887 recv = fc_lport_recv_echo_req;
888 break;
889 case ELS_RLIR:
890 recv = fc_lport_recv_rlir_req;
891 break;
892 case ELS_RNID:
893 recv = fc_lport_recv_rnid_req;
894 break;
895 case ELS_ADISC:
896 recv = fc_lport_recv_adisc_req;
897 break;
898 }
899
900 if (recv)
901 recv(sp, fp, lport);
902 else {
903 /*
904 * Find session.
905 * If this is a new incoming PLOGI, we won't find it.
906 */
907 s_id = ntoh24(fh->fh_s_id);
908 d_id = ntoh24(fh->fh_d_id);
909
910 rport = lport->tt.rport_lookup(lport, s_id);
911 if (rport)
912 lport->tt.rport_recv_req(sp, fp, rport);
913 else {
914 rjt_data.fp = NULL;
915 rjt_data.reason = ELS_RJT_UNAB;
916 rjt_data.explan = ELS_EXPL_NONE;
917 lport->tt.seq_els_rsp_send(sp,
918 ELS_LS_RJT,
919 &rjt_data);
920 fc_frame_free(fp);
921 }
922 }
923 } else {
924 FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
925 fc_frame_free(fp);
926 }
927 mutex_unlock(&lport->lp_mutex);
928
929 /*
930 * The common exch_done for all request may not be good
931 * if any request requires longer hold on exhange. XXX
932 */
933 lport->tt.exch_done(sp);
934}
935
936/**
937 * fc_lport_reset - Reset an lport
938 * @lport: The lport which should be reset
939 *
940 * Locking Note: This functions should not be called with the
941 * lport lock held.
942 */
943int fc_lport_reset(struct fc_lport *lport)
944{
945 mutex_lock(&lport->lp_mutex);
946 fc_lport_enter_reset(lport);
947 mutex_unlock(&lport->lp_mutex);
948 return 0;
949}
950EXPORT_SYMBOL(fc_lport_reset);
951
952/**
953 * fc_rport_enter_reset - Reset the local port
954 * @lport: Fibre Channel local port to be reset
955 *
956 * Locking Note: The lport lock is expected to be held before calling
957 * this routine.
958 */
959static void fc_lport_enter_reset(struct fc_lport *lport)
960{
961 FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
962 fc_host_port_id(lport->host), fc_lport_state(lport));
963
964 fc_lport_state_enter(lport, LPORT_ST_RESET);
965
966 if (lport->dns_rp)
967 lport->tt.rport_logoff(lport->dns_rp);
968
969 if (lport->ptp_rp) {
970 lport->tt.rport_logoff(lport->ptp_rp);
971 lport->ptp_rp = NULL;
972 }
973
974 lport->tt.disc_stop(lport);
975
976 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
977 fc_host_fabric_name(lport->host) = 0;
978 fc_host_port_id(lport->host) = 0;
979
980 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
981 fc_lport_enter_flogi(lport);
982}
983
984/**
985 * fc_lport_error - Handler for any errors
986 * @lport: The fc_lport object
987 * @fp: The frame pointer
988 *
989 * If the error was caused by a resource allocation failure
990 * then wait for half a second and retry, otherwise retry
991 * after the e_d_tov time.
992 */
993static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
994{
995 unsigned long delay = 0;
996 FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
997 PTR_ERR(fp), fc_lport_state(lport),
998 lport->retry_count);
999
1000 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1001 /*
1002 * Memory allocation failure, or the exchange timed out.
1003 * Retry after delay
1004 */
1005 if (lport->retry_count < lport->max_retry_count) {
1006 lport->retry_count++;
1007 if (!fp)
1008 delay = msecs_to_jiffies(500);
1009 else
1010 delay = msecs_to_jiffies(lport->e_d_tov);
1011
1012 schedule_delayed_work(&lport->retry_work, delay);
1013 } else {
1014 switch (lport->state) {
1015 case LPORT_ST_NONE:
1016 case LPORT_ST_READY:
1017 case LPORT_ST_RESET:
1018 case LPORT_ST_RPN_ID:
1019 case LPORT_ST_RFT_ID:
1020 case LPORT_ST_SCR:
1021 case LPORT_ST_DNS:
1022 case LPORT_ST_FLOGI:
1023 case LPORT_ST_LOGO:
1024 fc_lport_enter_reset(lport);
1025 break;
1026 }
1027 }
1028 }
1029}
1030
1031/**
1032 * fc_lport_rft_id_resp - Handle response to Register Fibre
1033 * Channel Types by ID (RPN_ID) request
1034 * @sp: current sequence in RPN_ID exchange
1035 * @fp: response frame
1036 * @lp_arg: Fibre Channel host port instance
1037 *
1038 * Locking Note: This function will be called without the lport lock
1039 * held, but it will lock, call an _enter_* function or fc_lport_error
1040 * and then unlock the lport.
1041 */
1042static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1043 void *lp_arg)
1044{
1045 struct fc_lport *lport = lp_arg;
1046 struct fc_frame_header *fh;
1047 struct fc_ct_hdr *ct;
1048
1049 if (fp == ERR_PTR(-FC_EX_CLOSED))
1050 return;
1051
1052 mutex_lock(&lport->lp_mutex);
1053
1054 FC_DEBUG_LPORT("Received a RFT_ID response\n");
1055
1056 if (lport->state != LPORT_ST_RFT_ID) {
1057 FC_DBG("Received a RFT_ID response, but in state %s\n",
1058 fc_lport_state(lport));
1059 goto out;
1060 }
1061
1062 if (IS_ERR(fp)) {
1063 fc_lport_error(lport, fp);
1064 goto err;
1065 }
1066
1067 fh = fc_frame_header_get(fp);
1068 ct = fc_frame_payload_get(fp, sizeof(*ct));
1069
1070 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1071 ct->ct_fs_type == FC_FST_DIR &&
1072 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1073 ntohs(ct->ct_cmd) == FC_FS_ACC)
1074 fc_lport_enter_scr(lport);
1075 else
1076 fc_lport_error(lport, fp);
1077out:
1078 fc_frame_free(fp);
1079err:
1080 mutex_unlock(&lport->lp_mutex);
1081}
1082
1083/**
1084 * fc_lport_rpn_id_resp - Handle response to Register Port
1085 * Name by ID (RPN_ID) request
1086 * @sp: current sequence in RPN_ID exchange
1087 * @fp: response frame
1088 * @lp_arg: Fibre Channel host port instance
1089 *
1090 * Locking Note: This function will be called without the lport lock
1091 * held, but it will lock, call an _enter_* function or fc_lport_error
1092 * and then unlock the lport.
1093 */
1094static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1095 void *lp_arg)
1096{
1097 struct fc_lport *lport = lp_arg;
1098 struct fc_frame_header *fh;
1099 struct fc_ct_hdr *ct;
1100
1101 if (fp == ERR_PTR(-FC_EX_CLOSED))
1102 return;
1103
1104 mutex_lock(&lport->lp_mutex);
1105
1106 FC_DEBUG_LPORT("Received a RPN_ID response\n");
1107
1108 if (lport->state != LPORT_ST_RPN_ID) {
1109 FC_DBG("Received a RPN_ID response, but in state %s\n",
1110 fc_lport_state(lport));
1111 goto out;
1112 }
1113
1114 if (IS_ERR(fp)) {
1115 fc_lport_error(lport, fp);
1116 goto err;
1117 }
1118
1119 fh = fc_frame_header_get(fp);
1120 ct = fc_frame_payload_get(fp, sizeof(*ct));
1121 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1122 ct->ct_fs_type == FC_FST_DIR &&
1123 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1124 ntohs(ct->ct_cmd) == FC_FS_ACC)
1125 fc_lport_enter_rft_id(lport);
1126 else
1127 fc_lport_error(lport, fp);
1128
1129out:
1130 fc_frame_free(fp);
1131err:
1132 mutex_unlock(&lport->lp_mutex);
1133}
1134
1135/**
1136 * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
1137 * @sp: current sequence in SCR exchange
1138 * @fp: response frame
1139 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1140 *
1141 * Locking Note: This function will be called without the lport lock
1142 * held, but it will lock, call an _enter_* function or fc_lport_error
1143 * and then unlock the lport.
1144 */
1145static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1146 void *lp_arg)
1147{
1148 struct fc_lport *lport = lp_arg;
1149 u8 op;
1150
1151 if (fp == ERR_PTR(-FC_EX_CLOSED))
1152 return;
1153
1154 mutex_lock(&lport->lp_mutex);
1155
1156 FC_DEBUG_LPORT("Received a SCR response\n");
1157
1158 if (lport->state != LPORT_ST_SCR) {
1159 FC_DBG("Received a SCR response, but in state %s\n",
1160 fc_lport_state(lport));
1161 goto out;
1162 }
1163
1164 if (IS_ERR(fp)) {
1165 fc_lport_error(lport, fp);
1166 goto err;
1167 }
1168
1169 op = fc_frame_payload_op(fp);
1170 if (op == ELS_LS_ACC)
1171 fc_lport_enter_ready(lport);
1172 else
1173 fc_lport_error(lport, fp);
1174
1175out:
1176 fc_frame_free(fp);
1177err:
1178 mutex_unlock(&lport->lp_mutex);
1179}
1180
1181/**
1182 * fc_lport_enter_scr - Send a State Change Register (SCR) request
1183 * @lport: Fibre Channel local port to register for state changes
1184 *
1185 * Locking Note: The lport lock is expected to be held before calling
1186 * this routine.
1187 */
1188static void fc_lport_enter_scr(struct fc_lport *lport)
1189{
1190 struct fc_frame *fp;
1191
1192 FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
1193 fc_host_port_id(lport->host), fc_lport_state(lport));
1194
1195 fc_lport_state_enter(lport, LPORT_ST_SCR);
1196
1197 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1198 if (!fp) {
1199 fc_lport_error(lport, fp);
1200 return;
1201 }
1202
1203 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
1204 fc_lport_scr_resp, lport, lport->e_d_tov))
1205 fc_lport_error(lport, fp);
1206}
1207
1208/**
1209 * fc_lport_enter_rft_id - Register FC4-types with the name server
1210 * @lport: Fibre Channel local port to register
1211 *
1212 * Locking Note: The lport lock is expected to be held before calling
1213 * this routine.
1214 */
1215static void fc_lport_enter_rft_id(struct fc_lport *lport)
1216{
1217 struct fc_frame *fp;
1218 struct fc_ns_fts *lps;
1219 int i;
1220
1221 FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
1222 fc_host_port_id(lport->host), fc_lport_state(lport));
1223
1224 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1225
1226 lps = &lport->fcts;
1227 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1228 while (--i >= 0)
1229 if (ntohl(lps->ff_type_map[i]) != 0)
1230 break;
1231 if (i < 0) {
1232 /* nothing to register, move on to SCR */
1233 fc_lport_enter_scr(lport);
1234 return;
1235 }
1236
1237 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1238 sizeof(struct fc_ns_rft));
1239 if (!fp) {
1240 fc_lport_error(lport, fp);
1241 return;
1242 }
1243
1244 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
1245 fc_lport_rft_id_resp,
1246 lport, lport->e_d_tov))
1247 fc_lport_error(lport, fp);
1248}
1249
1250/**
1251 * fc_rport_enter_rft_id - Register port name with the name server
1252 * @lport: Fibre Channel local port to register
1253 *
1254 * Locking Note: The lport lock is expected to be held before calling
1255 * this routine.
1256 */
1257static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1258{
1259 struct fc_frame *fp;
1260
1261 FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
1262 fc_host_port_id(lport->host), fc_lport_state(lport));
1263
1264 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1265
1266 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1267 sizeof(struct fc_ns_rn_id));
1268 if (!fp) {
1269 fc_lport_error(lport, fp);
1270 return;
1271 }
1272
1273 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
1274 fc_lport_rpn_id_resp,
1275 lport, lport->e_d_tov))
1276 fc_lport_error(lport, fp);
1277}
1278
1279static struct fc_rport_operations fc_lport_rport_ops = {
1280 .event_callback = fc_lport_rport_callback,
1281};
1282
1283/**
1284 * fc_rport_enter_dns - Create a rport to the name server
1285 * @lport: Fibre Channel local port requesting a rport for the name server
1286 *
1287 * Locking Note: The lport lock is expected to be held before calling
1288 * this routine.
1289 */
1290static void fc_lport_enter_dns(struct fc_lport *lport)
1291{
1292 struct fc_rport *rport;
1293 struct fc_rport_libfc_priv *rdata;
1294 struct fc_disc_port dp;
1295
1296 dp.ids.port_id = FC_FID_DIR_SERV;
1297 dp.ids.port_name = -1;
1298 dp.ids.node_name = -1;
1299 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
1300 dp.lp = lport;
1301
1302 FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
1303 fc_host_port_id(lport->host), fc_lport_state(lport));
1304
1305 fc_lport_state_enter(lport, LPORT_ST_DNS);
1306
1307 rport = fc_rport_rogue_create(&dp);
1308 if (!rport)
1309 goto err;
1310
1311 rdata = rport->dd_data;
1312 rdata->ops = &fc_lport_rport_ops;
1313 lport->tt.rport_login(rport);
1314 return;
1315
1316err:
1317 fc_lport_error(lport, NULL);
1318}
1319
1320/**
1321 * fc_lport_timeout - Handler for the retry_work timer.
1322 * @work: The work struct of the fc_lport
1323 */
1324static void fc_lport_timeout(struct work_struct *work)
1325{
1326 struct fc_lport *lport =
1327 container_of(work, struct fc_lport,
1328 retry_work.work);
1329
1330 mutex_lock(&lport->lp_mutex);
1331
1332 switch (lport->state) {
1333 case LPORT_ST_NONE:
1334 case LPORT_ST_READY:
1335 case LPORT_ST_RESET:
1336 WARN_ON(1);
1337 break;
1338 case LPORT_ST_FLOGI:
1339 fc_lport_enter_flogi(lport);
1340 break;
1341 case LPORT_ST_DNS:
1342 fc_lport_enter_dns(lport);
1343 break;
1344 case LPORT_ST_RPN_ID:
1345 fc_lport_enter_rpn_id(lport);
1346 break;
1347 case LPORT_ST_RFT_ID:
1348 fc_lport_enter_rft_id(lport);
1349 break;
1350 case LPORT_ST_SCR:
1351 fc_lport_enter_scr(lport);
1352 break;
1353 case LPORT_ST_LOGO:
1354 fc_lport_enter_logo(lport);
1355 break;
1356 }
1357
1358 mutex_unlock(&lport->lp_mutex);
1359}
1360
1361/**
1362 * fc_lport_logo_resp - Handle response to LOGO request
1363 * @sp: current sequence in LOGO exchange
1364 * @fp: response frame
1365 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1366 *
1367 * Locking Note: This function will be called without the lport lock
1368 * held, but it will lock, call an _enter_* function or fc_lport_error
1369 * and then unlock the lport.
1370 */
1371static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1372 void *lp_arg)
1373{
1374 struct fc_lport *lport = lp_arg;
1375 u8 op;
1376
1377 if (fp == ERR_PTR(-FC_EX_CLOSED))
1378 return;
1379
1380 mutex_lock(&lport->lp_mutex);
1381
1382 FC_DEBUG_LPORT("Received a LOGO response\n");
1383
1384 if (lport->state != LPORT_ST_LOGO) {
1385 FC_DBG("Received a LOGO response, but in state %s\n",
1386 fc_lport_state(lport));
1387 goto out;
1388 }
1389
1390 if (IS_ERR(fp)) {
1391 fc_lport_error(lport, fp);
1392 goto err;
1393 }
1394
1395 op = fc_frame_payload_op(fp);
1396 if (op == ELS_LS_ACC)
1397 fc_lport_enter_reset(lport);
1398 else
1399 fc_lport_error(lport, fp);
1400
1401out:
1402 fc_frame_free(fp);
1403err:
1404 mutex_unlock(&lport->lp_mutex);
1405}
1406
1407/**
1408 * fc_rport_enter_logo - Logout of the fabric
1409 * @lport: Fibre Channel local port to be logged out
1410 *
1411 * Locking Note: The lport lock is expected to be held before calling
1412 * this routine.
1413 */
1414static void fc_lport_enter_logo(struct fc_lport *lport)
1415{
1416 struct fc_frame *fp;
1417 struct fc_els_logo *logo;
1418
1419 FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
1420 fc_host_port_id(lport->host), fc_lport_state(lport));
1421
1422 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1423
1424 /* DNS session should be closed so we can release it here */
1425 if (lport->dns_rp)
1426 lport->tt.rport_logoff(lport->dns_rp);
1427
1428 fp = fc_frame_alloc(lport, sizeof(*logo));
1429 if (!fp) {
1430 fc_lport_error(lport, fp);
1431 return;
1432 }
1433
1434 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
1435 lport, lport->e_d_tov))
1436 fc_lport_error(lport, fp);
1437}
1438
1439/**
1440 * fc_lport_flogi_resp - Handle response to FLOGI request
1441 * @sp: current sequence in FLOGI exchange
1442 * @fp: response frame
1443 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1444 *
1445 * Locking Note: This function will be called without the lport lock
1446 * held, but it will lock, call an _enter_* function or fc_lport_error
1447 * and then unlock the lport.
1448 */
1449static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1450 void *lp_arg)
1451{
1452 struct fc_lport *lport = lp_arg;
1453 struct fc_frame_header *fh;
1454 struct fc_els_flogi *flp;
1455 u32 did;
1456 u16 csp_flags;
1457 unsigned int r_a_tov;
1458 unsigned int e_d_tov;
1459 u16 mfs;
1460
1461 if (fp == ERR_PTR(-FC_EX_CLOSED))
1462 return;
1463
1464 mutex_lock(&lport->lp_mutex);
1465
1466 FC_DEBUG_LPORT("Received a FLOGI response\n");
1467
1468 if (lport->state != LPORT_ST_FLOGI) {
1469 FC_DBG("Received a FLOGI response, but in state %s\n",
1470 fc_lport_state(lport));
1471 goto out;
1472 }
1473
1474 if (IS_ERR(fp)) {
1475 fc_lport_error(lport, fp);
1476 goto err;
1477 }
1478
1479 fh = fc_frame_header_get(fp);
1480 did = ntoh24(fh->fh_d_id);
1481 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1482
1483 FC_DEBUG_LPORT("Assigned fid %x\n", did);
1484 fc_host_port_id(lport->host) = did;
1485
1486 flp = fc_frame_payload_get(fp, sizeof(*flp));
1487 if (flp) {
1488 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1489 FC_SP_BB_DATA_MASK;
1490 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1491 mfs < lport->mfs)
1492 lport->mfs = mfs;
1493 csp_flags = ntohs(flp->fl_csp.sp_features);
1494 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1495 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1496 if (csp_flags & FC_SP_FT_EDTR)
1497 e_d_tov /= 1000000;
1498 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1499 if (e_d_tov > lport->e_d_tov)
1500 lport->e_d_tov = e_d_tov;
1501 lport->r_a_tov = 2 * e_d_tov;
1502 FC_DBG("Point-to-Point mode\n");
1503 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1504 get_unaligned_be64(
1505 &flp->fl_wwpn),
1506 get_unaligned_be64(
1507 &flp->fl_wwnn));
1508 } else {
1509 lport->e_d_tov = e_d_tov;
1510 lport->r_a_tov = r_a_tov;
1511 fc_host_fabric_name(lport->host) =
1512 get_unaligned_be64(&flp->fl_wwnn);
1513 fc_lport_enter_dns(lport);
1514 }
1515 }
1516
1517 if (flp) {
1518 csp_flags = ntohs(flp->fl_csp.sp_features);
1519 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1520 lport->tt.disc_start(fc_lport_disc_callback,
1521 lport);
1522 }
1523 }
1524 } else {
1525 FC_DBG("bad FLOGI response\n");
1526 }
1527
1528out:
1529 fc_frame_free(fp);
1530err:
1531 mutex_unlock(&lport->lp_mutex);
1532}
1533
1534/**
1535 * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
1536 * @lport: Fibre Channel local port to be logged in to the fabric
1537 *
1538 * Locking Note: The lport lock is expected to be held before calling
1539 * this routine.
1540 */
1541void fc_lport_enter_flogi(struct fc_lport *lport)
1542{
1543 struct fc_frame *fp;
1544
1545 FC_DEBUG_LPORT("Processing FLOGI state\n");
1546
1547 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1548
1549 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1550 if (!fp)
1551 return fc_lport_error(lport, fp);
1552
1553 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
1554 fc_lport_flogi_resp, lport, lport->e_d_tov))
1555 fc_lport_error(lport, fp);
1556}
1557
1558/* Configure a fc_lport */
1559int fc_lport_config(struct fc_lport *lport)
1560{
1561 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1562 mutex_init(&lport->lp_mutex);
1563
1564 fc_lport_state_enter(lport, LPORT_ST_NONE);
1565
1566 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1567 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1568
1569 return 0;
1570}
1571EXPORT_SYMBOL(fc_lport_config);
1572
1573int fc_lport_init(struct fc_lport *lport)
1574{
1575 if (!lport->tt.lport_recv)
1576 lport->tt.lport_recv = fc_lport_recv_req;
1577
1578 if (!lport->tt.lport_reset)
1579 lport->tt.lport_reset = fc_lport_reset;
1580
1581 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1582 fc_host_node_name(lport->host) = lport->wwnn;
1583 fc_host_port_name(lport->host) = lport->wwpn;
1584 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1585 memset(fc_host_supported_fc4s(lport->host), 0,
1586 sizeof(fc_host_supported_fc4s(lport->host)));
1587 fc_host_supported_fc4s(lport->host)[2] = 1;
1588 fc_host_supported_fc4s(lport->host)[7] = 1;
1589
1590 /* This value is also unchanging */
1591 memset(fc_host_active_fc4s(lport->host), 0,
1592 sizeof(fc_host_active_fc4s(lport->host)));
1593 fc_host_active_fc4s(lport->host)[2] = 1;
1594 fc_host_active_fc4s(lport->host)[7] = 1;
1595 fc_host_maxframe_size(lport->host) = lport->mfs;
1596 fc_host_supported_speeds(lport->host) = 0;
1597 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1598 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1601
1602 return 0;
1603}
1604EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000000..e780d8caf70e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1291 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
58static int fc_rport_debug;
59
60#define FC_DEBUG_RPORT(fmt...) \
61 do { \
62 if (fc_rport_debug) \
63 FC_DBG(fmt); \
64 } while (0)
65
66struct workqueue_struct *rport_event_queue;
67
68static void fc_rport_enter_plogi(struct fc_rport *);
69static void fc_rport_enter_prli(struct fc_rport *);
70static void fc_rport_enter_rtv(struct fc_rport *);
71static void fc_rport_enter_ready(struct fc_rport *);
72static void fc_rport_enter_logo(struct fc_rport *);
73
74static void fc_rport_recv_plogi_req(struct fc_rport *,
75 struct fc_seq *, struct fc_frame *);
76static void fc_rport_recv_prli_req(struct fc_rport *,
77 struct fc_seq *, struct fc_frame *);
78static void fc_rport_recv_prlo_req(struct fc_rport *,
79 struct fc_seq *, struct fc_frame *);
80static void fc_rport_recv_logo_req(struct fc_rport *,
81 struct fc_seq *, struct fc_frame *);
82static void fc_rport_timeout(struct work_struct *);
83static void fc_rport_error(struct fc_rport *, struct fc_frame *);
84static void fc_rport_work(struct work_struct *);
85
86static const char *fc_rport_state_names[] = {
87 [RPORT_ST_NONE] = "None",
88 [RPORT_ST_INIT] = "Init",
89 [RPORT_ST_PLOGI] = "PLOGI",
90 [RPORT_ST_PRLI] = "PRLI",
91 [RPORT_ST_RTV] = "RTV",
92 [RPORT_ST_READY] = "Ready",
93 [RPORT_ST_LOGO] = "LOGO",
94};
95
96static void fc_rport_rogue_destroy(struct device *dev)
97{
98 struct fc_rport *rport = dev_to_rport(dev);
99 FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
100 kfree(rport);
101}
102
103struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
104{
105 struct fc_rport *rport;
106 struct fc_rport_libfc_priv *rdata;
107 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
108
109 if (!rport)
110 return NULL;
111
112 rdata = RPORT_TO_PRIV(rport);
113
114 rport->dd_data = rdata;
115 rport->port_id = dp->ids.port_id;
116 rport->port_name = dp->ids.port_name;
117 rport->node_name = dp->ids.node_name;
118 rport->roles = dp->ids.roles;
119 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
120 /*
121 * Note: all this libfc rogue rport code will be removed for
122 * upstream so it fine that this is really ugly and hacky right now.
123 */
124 device_initialize(&rport->dev);
125 rport->dev.release = fc_rport_rogue_destroy;
126
127 mutex_init(&rdata->rp_mutex);
128 rdata->local_port = dp->lp;
129 rdata->trans_state = FC_PORTSTATE_ROGUE;
130 rdata->rp_state = RPORT_ST_INIT;
131 rdata->event = RPORT_EV_NONE;
132 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
133 rdata->ops = NULL;
134 rdata->e_d_tov = dp->lp->e_d_tov;
135 rdata->r_a_tov = dp->lp->r_a_tov;
136 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
137 INIT_WORK(&rdata->event_work, fc_rport_work);
138 /*
139 * For good measure, but not necessary as we should only
140 * add REAL rport to the lport list.
141 */
142 INIT_LIST_HEAD(&rdata->peers);
143
144 return rport;
145}
146
147/**
148 * fc_rport_state - return a string for the state the rport is in
149 * @rport: The rport whose state we want to get a string for
150 */
151static const char *fc_rport_state(struct fc_rport *rport)
152{
153 const char *cp;
154 struct fc_rport_libfc_priv *rdata = rport->dd_data;
155
156 cp = fc_rport_state_names[rdata->rp_state];
157 if (!cp)
158 cp = "Unknown";
159 return cp;
160}
161
162/**
163 * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
164 * @rport: Pointer to Fibre Channel remote port structure
165 * @timeout: timeout in seconds
166 */
167void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
168{
169 if (timeout)
170 rport->dev_loss_tmo = timeout + 5;
171 else
172 rport->dev_loss_tmo = 30;
173}
174EXPORT_SYMBOL(fc_set_rport_loss_tmo);
175
176/**
177 * fc_plogi_get_maxframe - Get max payload from the common service parameters
178 * @flp: FLOGI payload structure
179 * @maxval: upper limit, may be less than what is in the service parameters
180 */
181static unsigned int
182fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
183{
184 unsigned int mfs;
185
186 /*
187 * Get max payload from the common service parameters and the
188 * class 3 receive data field size.
189 */
190 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
191 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
192 maxval = mfs;
193 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
194 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
195 maxval = mfs;
196 return maxval;
197}
198
199/**
200 * fc_rport_state_enter - Change the rport's state
201 * @rport: The rport whose state should change
202 * @new: The new state of the rport
203 *
204 * Locking Note: Called with the rport lock held
205 */
206static void fc_rport_state_enter(struct fc_rport *rport,
207 enum fc_rport_state new)
208{
209 struct fc_rport_libfc_priv *rdata = rport->dd_data;
210 if (rdata->rp_state != new)
211 rdata->retries = 0;
212 rdata->rp_state = new;
213}
214
215static void fc_rport_work(struct work_struct *work)
216{
217 struct fc_rport_libfc_priv *rdata =
218 container_of(work, struct fc_rport_libfc_priv, event_work);
219 enum fc_rport_event event;
220 enum fc_rport_trans_state trans_state;
221 struct fc_lport *lport = rdata->local_port;
222 struct fc_rport_operations *rport_ops;
223 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
224
225 mutex_lock(&rdata->rp_mutex);
226 event = rdata->event;
227 rport_ops = rdata->ops;
228
229 if (event == RPORT_EV_CREATED) {
230 struct fc_rport *new_rport;
231 struct fc_rport_libfc_priv *new_rdata;
232 struct fc_rport_identifiers ids;
233
234 ids.port_id = rport->port_id;
235 ids.roles = rport->roles;
236 ids.port_name = rport->port_name;
237 ids.node_name = rport->node_name;
238
239 mutex_unlock(&rdata->rp_mutex);
240
241 new_rport = fc_remote_port_add(lport->host, 0, &ids);
242 if (new_rport) {
243 /*
244 * Switch from the rogue rport to the rport
245 * returned by the FC class.
246 */
247 new_rport->maxframe_size = rport->maxframe_size;
248
249 new_rdata = new_rport->dd_data;
250 new_rdata->e_d_tov = rdata->e_d_tov;
251 new_rdata->r_a_tov = rdata->r_a_tov;
252 new_rdata->ops = rdata->ops;
253 new_rdata->local_port = rdata->local_port;
254 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
255 new_rdata->trans_state = FC_PORTSTATE_REAL;
256 mutex_init(&new_rdata->rp_mutex);
257 INIT_DELAYED_WORK(&new_rdata->retry_work,
258 fc_rport_timeout);
259 INIT_LIST_HEAD(&new_rdata->peers);
260 INIT_WORK(&new_rdata->event_work, fc_rport_work);
261
262 fc_rport_state_enter(new_rport, RPORT_ST_READY);
263 } else {
264 FC_DBG("Failed to create the rport for port "
265 "(%6x).\n", ids.port_id);
266 event = RPORT_EV_FAILED;
267 }
268 put_device(&rport->dev);
269 rport = new_rport;
270 rdata = new_rport->dd_data;
271 if (rport_ops->event_callback)
272 rport_ops->event_callback(lport, rport, event);
273 } else if ((event == RPORT_EV_FAILED) ||
274 (event == RPORT_EV_LOGO) ||
275 (event == RPORT_EV_STOP)) {
276 trans_state = rdata->trans_state;
277 mutex_unlock(&rdata->rp_mutex);
278 if (rport_ops->event_callback)
279 rport_ops->event_callback(lport, rport, event);
280 if (trans_state == FC_PORTSTATE_ROGUE)
281 put_device(&rport->dev);
282 else
283 fc_remote_port_delete(rport);
284 } else
285 mutex_unlock(&rdata->rp_mutex);
286}
287
288/**
289 * fc_rport_login - Start the remote port login state machine
290 * @rport: Fibre Channel remote port
291 *
292 * Locking Note: Called without the rport lock held. This
293 * function will hold the rport lock, call an _enter_*
294 * function and then unlock the rport.
295 */
296int fc_rport_login(struct fc_rport *rport)
297{
298 struct fc_rport_libfc_priv *rdata = rport->dd_data;
299
300 mutex_lock(&rdata->rp_mutex);
301
302 FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
303
304 fc_rport_enter_plogi(rport);
305
306 mutex_unlock(&rdata->rp_mutex);
307
308 return 0;
309}
310
311/**
312 * fc_rport_logoff - Logoff and remove an rport
313 * @rport: Fibre Channel remote port to be removed
314 *
315 * Locking Note: Called without the rport lock held. This
316 * function will hold the rport lock, call an _enter_*
317 * function and then unlock the rport.
318 */
319int fc_rport_logoff(struct fc_rport *rport)
320{
321 struct fc_rport_libfc_priv *rdata = rport->dd_data;
322
323 mutex_lock(&rdata->rp_mutex);
324
325 FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
326
327 fc_rport_enter_logo(rport);
328
329 /*
330 * Change the state to NONE so that we discard
331 * the response.
332 */
333 fc_rport_state_enter(rport, RPORT_ST_NONE);
334
335 mutex_unlock(&rdata->rp_mutex);
336
337 cancel_delayed_work_sync(&rdata->retry_work);
338
339 mutex_lock(&rdata->rp_mutex);
340
341 rdata->event = RPORT_EV_STOP;
342 queue_work(rport_event_queue, &rdata->event_work);
343
344 mutex_unlock(&rdata->rp_mutex);
345
346 return 0;
347}
348
349/**
350 * fc_rport_enter_ready - The rport is ready
351 * @rport: Fibre Channel remote port that is ready
352 *
353 * Locking Note: The rport lock is expected to be held before calling
354 * this routine.
355 */
356static void fc_rport_enter_ready(struct fc_rport *rport)
357{
358 struct fc_rport_libfc_priv *rdata = rport->dd_data;
359
360 fc_rport_state_enter(rport, RPORT_ST_READY);
361
362 FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
363
364 rdata->event = RPORT_EV_CREATED;
365 queue_work(rport_event_queue, &rdata->event_work);
366}
367
368/**
369 * fc_rport_timeout - Handler for the retry_work timer.
370 * @work: The work struct of the fc_rport_libfc_priv
371 *
372 * Locking Note: Called without the rport lock held. This
373 * function will hold the rport lock, call an _enter_*
374 * function and then unlock the rport.
375 */
376static void fc_rport_timeout(struct work_struct *work)
377{
378 struct fc_rport_libfc_priv *rdata =
379 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
380 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
381
382 mutex_lock(&rdata->rp_mutex);
383
384 switch (rdata->rp_state) {
385 case RPORT_ST_PLOGI:
386 fc_rport_enter_plogi(rport);
387 break;
388 case RPORT_ST_PRLI:
389 fc_rport_enter_prli(rport);
390 break;
391 case RPORT_ST_RTV:
392 fc_rport_enter_rtv(rport);
393 break;
394 case RPORT_ST_LOGO:
395 fc_rport_enter_logo(rport);
396 break;
397 case RPORT_ST_READY:
398 case RPORT_ST_INIT:
399 case RPORT_ST_NONE:
400 break;
401 }
402
403 mutex_unlock(&rdata->rp_mutex);
404 put_device(&rport->dev);
405}
406
407/**
408 * fc_rport_error - Handler for any errors
409 * @rport: The fc_rport object
410 * @fp: The frame pointer
411 *
412 * If the error was caused by a resource allocation failure
413 * then wait for half a second and retry, otherwise retry
414 * immediately.
415 *
416 * Locking Note: The rport lock is expected to be held before
417 * calling this routine
418 */
419static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
420{
421 struct fc_rport_libfc_priv *rdata = rport->dd_data;
422 unsigned long delay = 0;
423
424 FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
425 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
426
427 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
428 /*
429 * Memory allocation failure, or the exchange timed out.
430 * Retry after delay
431 */
432 if (rdata->retries < rdata->local_port->max_retry_count) {
433 rdata->retries++;
434 if (!fp)
435 delay = msecs_to_jiffies(500);
436 get_device(&rport->dev);
437 schedule_delayed_work(&rdata->retry_work, delay);
438 } else {
439 switch (rdata->rp_state) {
440 case RPORT_ST_PLOGI:
441 case RPORT_ST_PRLI:
442 case RPORT_ST_LOGO:
443 rdata->event = RPORT_EV_FAILED;
444 queue_work(rport_event_queue,
445 &rdata->event_work);
446 break;
447 case RPORT_ST_RTV:
448 fc_rport_enter_ready(rport);
449 break;
450 case RPORT_ST_NONE:
451 case RPORT_ST_READY:
452 case RPORT_ST_INIT:
453 break;
454 }
455 }
456 }
457}
458
459/**
460 * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
461 * @sp: current sequence in the PLOGI exchange
462 * @fp: response frame
463 * @rp_arg: Fibre Channel remote port
464 *
465 * Locking Note: This function will be called without the rport lock
466 * held, but it will lock, call an _enter_* function or fc_rport_error
467 * and then unlock the rport.
468 */
469static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
470 void *rp_arg)
471{
472 struct fc_rport *rport = rp_arg;
473 struct fc_rport_libfc_priv *rdata = rport->dd_data;
474 struct fc_lport *lport = rdata->local_port;
475 struct fc_els_flogi *plp;
476 unsigned int tov;
477 u16 csp_seq;
478 u16 cssp_seq;
479 u8 op;
480
481 mutex_lock(&rdata->rp_mutex);
482
483 FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
484 rport->port_id);
485
486 if (rdata->rp_state != RPORT_ST_PLOGI) {
487 FC_DBG("Received a PLOGI response, but in state %s\n",
488 fc_rport_state(rport));
489 goto out;
490 }
491
492 if (IS_ERR(fp)) {
493 fc_rport_error(rport, fp);
494 goto err;
495 }
496
497 op = fc_frame_payload_op(fp);
498 if (op == ELS_LS_ACC &&
499 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
500 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
501 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
502
503 tov = ntohl(plp->fl_csp.sp_e_d_tov);
504 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
505 tov /= 1000;
506 if (tov > rdata->e_d_tov)
507 rdata->e_d_tov = tov;
508 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
509 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
510 if (cssp_seq < csp_seq)
511 csp_seq = cssp_seq;
512 rdata->max_seq = csp_seq;
513 rport->maxframe_size =
514 fc_plogi_get_maxframe(plp, lport->mfs);
515
516 /*
517 * If the rport is one of the well known addresses
518 * we skip PRLI and RTV and go straight to READY.
519 */
520 if (rport->port_id >= FC_FID_DOM_MGR)
521 fc_rport_enter_ready(rport);
522 else
523 fc_rport_enter_prli(rport);
524 } else
525 fc_rport_error(rport, fp);
526
527out:
528 fc_frame_free(fp);
529err:
530 mutex_unlock(&rdata->rp_mutex);
531 put_device(&rport->dev);
532}
533
534/**
535 * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
536 * @rport: Fibre Channel remote port to send PLOGI to
537 *
538 * Locking Note: The rport lock is expected to be held before calling
539 * this routine.
540 */
541static void fc_rport_enter_plogi(struct fc_rport *rport)
542{
543 struct fc_rport_libfc_priv *rdata = rport->dd_data;
544 struct fc_lport *lport = rdata->local_port;
545 struct fc_frame *fp;
546
547 FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
548 rport->port_id, fc_rport_state(rport));
549
550 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
551
552 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
553 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
554 if (!fp) {
555 fc_rport_error(rport, fp);
556 return;
557 }
558 rdata->e_d_tov = lport->e_d_tov;
559
560 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
561 fc_rport_plogi_resp, rport, lport->e_d_tov))
562 fc_rport_error(rport, fp);
563 else
564 get_device(&rport->dev);
565}
566
567/**
568 * fc_rport_prli_resp - Process Login (PRLI) response handler
569 * @sp: current sequence in the PRLI exchange
570 * @fp: response frame
571 * @rp_arg: Fibre Channel remote port
572 *
573 * Locking Note: This function will be called without the rport lock
574 * held, but it will lock, call an _enter_* function or fc_rport_error
575 * and then unlock the rport.
576 */
577static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
578 void *rp_arg)
579{
580 struct fc_rport *rport = rp_arg;
581 struct fc_rport_libfc_priv *rdata = rport->dd_data;
582 struct {
583 struct fc_els_prli prli;
584 struct fc_els_spp spp;
585 } *pp;
586 u32 roles = FC_RPORT_ROLE_UNKNOWN;
587 u32 fcp_parm = 0;
588 u8 op;
589
590 mutex_lock(&rdata->rp_mutex);
591
592 FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
593 rport->port_id);
594
595 if (rdata->rp_state != RPORT_ST_PRLI) {
596 FC_DBG("Received a PRLI response, but in state %s\n",
597 fc_rport_state(rport));
598 goto out;
599 }
600
601 if (IS_ERR(fp)) {
602 fc_rport_error(rport, fp);
603 goto err;
604 }
605
606 op = fc_frame_payload_op(fp);
607 if (op == ELS_LS_ACC) {
608 pp = fc_frame_payload_get(fp, sizeof(*pp));
609 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
610 fcp_parm = ntohl(pp->spp.spp_params);
611 if (fcp_parm & FCP_SPPF_RETRY)
612 rdata->flags |= FC_RP_FLAGS_RETRY;
613 }
614
615 rport->supported_classes = FC_COS_CLASS3;
616 if (fcp_parm & FCP_SPPF_INIT_FCN)
617 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
618 if (fcp_parm & FCP_SPPF_TARG_FCN)
619 roles |= FC_RPORT_ROLE_FCP_TARGET;
620
621 rport->roles = roles;
622 fc_rport_enter_rtv(rport);
623
624 } else {
625 FC_DBG("Bad ELS response\n");
626 rdata->event = RPORT_EV_FAILED;
627 queue_work(rport_event_queue, &rdata->event_work);
628 }
629
630out:
631 fc_frame_free(fp);
632err:
633 mutex_unlock(&rdata->rp_mutex);
634 put_device(&rport->dev);
635}
636
637/**
638 * fc_rport_logo_resp - Logout (LOGO) response handler
639 * @sp: current sequence in the LOGO exchange
640 * @fp: response frame
641 * @rp_arg: Fibre Channel remote port
642 *
643 * Locking Note: This function will be called without the rport lock
644 * held, but it will lock, call an _enter_* function or fc_rport_error
645 * and then unlock the rport.
646 */
647static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
648 void *rp_arg)
649{
650 struct fc_rport *rport = rp_arg;
651 struct fc_rport_libfc_priv *rdata = rport->dd_data;
652 u8 op;
653
654 mutex_lock(&rdata->rp_mutex);
655
656 FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
657 rport->port_id);
658
659 if (IS_ERR(fp)) {
660 fc_rport_error(rport, fp);
661 goto err;
662 }
663
664 if (rdata->rp_state != RPORT_ST_LOGO) {
665 FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
666 fc_rport_state(rport));
667 goto out;
668 }
669
670 op = fc_frame_payload_op(fp);
671 if (op == ELS_LS_ACC) {
672 fc_rport_enter_rtv(rport);
673 } else {
674 FC_DBG("Bad ELS response\n");
675 rdata->event = RPORT_EV_LOGO;
676 queue_work(rport_event_queue, &rdata->event_work);
677 }
678
679out:
680 fc_frame_free(fp);
681err:
682 mutex_unlock(&rdata->rp_mutex);
683 put_device(&rport->dev);
684}
685
686/**
687 * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
688 * @rport: Fibre Channel remote port to send PRLI to
689 *
690 * Locking Note: The rport lock is expected to be held before calling
691 * this routine.
692 */
693static void fc_rport_enter_prli(struct fc_rport *rport)
694{
695 struct fc_rport_libfc_priv *rdata = rport->dd_data;
696 struct fc_lport *lport = rdata->local_port;
697 struct {
698 struct fc_els_prli prli;
699 struct fc_els_spp spp;
700 } *pp;
701 struct fc_frame *fp;
702
703 FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
704 rport->port_id, fc_rport_state(rport));
705
706 fc_rport_state_enter(rport, RPORT_ST_PRLI);
707
708 fp = fc_frame_alloc(lport, sizeof(*pp));
709 if (!fp) {
710 fc_rport_error(rport, fp);
711 return;
712 }
713
714 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
715 fc_rport_prli_resp, rport, lport->e_d_tov))
716 fc_rport_error(rport, fp);
717 else
718 get_device(&rport->dev);
719}
720
721/**
722 * fc_rport_els_rtv_resp - Request Timeout Value response handler
723 * @sp: current sequence in the RTV exchange
724 * @fp: response frame
725 * @rp_arg: Fibre Channel remote port
726 *
727 * Many targets don't seem to support this.
728 *
729 * Locking Note: This function will be called without the rport lock
730 * held, but it will lock, call an _enter_* function or fc_rport_error
731 * and then unlock the rport.
732 */
733static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
734 void *rp_arg)
735{
736 struct fc_rport *rport = rp_arg;
737 struct fc_rport_libfc_priv *rdata = rport->dd_data;
738 u8 op;
739
740 mutex_lock(&rdata->rp_mutex);
741
742 FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
743 rport->port_id);
744
745 if (rdata->rp_state != RPORT_ST_RTV) {
746 FC_DBG("Received a RTV response, but in state %s\n",
747 fc_rport_state(rport));
748 goto out;
749 }
750
751 if (IS_ERR(fp)) {
752 fc_rport_error(rport, fp);
753 goto err;
754 }
755
756 op = fc_frame_payload_op(fp);
757 if (op == ELS_LS_ACC) {
758 struct fc_els_rtv_acc *rtv;
759 u32 toq;
760 u32 tov;
761
762 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
763 if (rtv) {
764 toq = ntohl(rtv->rtv_toq);
765 tov = ntohl(rtv->rtv_r_a_tov);
766 if (tov == 0)
767 tov = 1;
768 rdata->r_a_tov = tov;
769 tov = ntohl(rtv->rtv_e_d_tov);
770 if (toq & FC_ELS_RTV_EDRES)
771 tov /= 1000000;
772 if (tov == 0)
773 tov = 1;
774 rdata->e_d_tov = tov;
775 }
776 }
777
778 fc_rport_enter_ready(rport);
779
780out:
781 fc_frame_free(fp);
782err:
783 mutex_unlock(&rdata->rp_mutex);
784 put_device(&rport->dev);
785}
786
787/**
788 * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
789 * @rport: Fibre Channel remote port to send RTV to
790 *
791 * Locking Note: The rport lock is expected to be held before calling
792 * this routine.
793 */
794static void fc_rport_enter_rtv(struct fc_rport *rport)
795{
796 struct fc_frame *fp;
797 struct fc_rport_libfc_priv *rdata = rport->dd_data;
798 struct fc_lport *lport = rdata->local_port;
799
800 FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
801 rport->port_id, fc_rport_state(rport));
802
803 fc_rport_state_enter(rport, RPORT_ST_RTV);
804
805 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
806 if (!fp) {
807 fc_rport_error(rport, fp);
808 return;
809 }
810
811 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
812 fc_rport_rtv_resp, rport, lport->e_d_tov))
813 fc_rport_error(rport, fp);
814 else
815 get_device(&rport->dev);
816}
817
818/**
819 * fc_rport_enter_logo - Send Logout (LOGO) request to peer
820 * @rport: Fibre Channel remote port to send LOGO to
821 *
822 * Locking Note: The rport lock is expected to be held before calling
823 * this routine.
824 */
825static void fc_rport_enter_logo(struct fc_rport *rport)
826{
827 struct fc_rport_libfc_priv *rdata = rport->dd_data;
828 struct fc_lport *lport = rdata->local_port;
829 struct fc_frame *fp;
830
831 FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
832 rport->port_id, fc_rport_state(rport));
833
834 fc_rport_state_enter(rport, RPORT_ST_LOGO);
835
836 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
837 if (!fp) {
838 fc_rport_error(rport, fp);
839 return;
840 }
841
842 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
843 fc_rport_logo_resp, rport, lport->e_d_tov))
844 fc_rport_error(rport, fp);
845 else
846 get_device(&rport->dev);
847}
848
849
850/**
851 * fc_rport_recv_req - Receive a request from a rport
852 * @sp: current sequence in the PLOGI exchange
853 * @fp: response frame
854 * @rp_arg: Fibre Channel remote port
855 *
856 * Locking Note: Called without the rport lock held. This
857 * function will hold the rport lock, call an _enter_*
858 * function and then unlock the rport.
859 */
860void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
861 struct fc_rport *rport)
862{
863 struct fc_rport_libfc_priv *rdata = rport->dd_data;
864 struct fc_lport *lport = rdata->local_port;
865
866 struct fc_frame_header *fh;
867 struct fc_seq_els_data els_data;
868 u8 op;
869
870 mutex_lock(&rdata->rp_mutex);
871
872 els_data.fp = NULL;
873 els_data.explan = ELS_EXPL_NONE;
874 els_data.reason = ELS_RJT_NONE;
875
876 fh = fc_frame_header_get(fp);
877
878 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
879 op = fc_frame_payload_op(fp);
880 switch (op) {
881 case ELS_PLOGI:
882 fc_rport_recv_plogi_req(rport, sp, fp);
883 break;
884 case ELS_PRLI:
885 fc_rport_recv_prli_req(rport, sp, fp);
886 break;
887 case ELS_PRLO:
888 fc_rport_recv_prlo_req(rport, sp, fp);
889 break;
890 case ELS_LOGO:
891 fc_rport_recv_logo_req(rport, sp, fp);
892 break;
893 case ELS_RRQ:
894 els_data.fp = fp;
895 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
896 break;
897 case ELS_REC:
898 els_data.fp = fp;
899 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
900 break;
901 default:
902 els_data.reason = ELS_RJT_UNSUP;
903 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
904 break;
905 }
906 }
907
908 mutex_unlock(&rdata->rp_mutex);
909}
910
911/**
912 * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
913 * @rport: Fibre Channel remote port that initiated PLOGI
914 * @sp: current sequence in the PLOGI exchange
915 * @fp: PLOGI request frame
916 *
917 * Locking Note: The rport lock is exected to be held before calling
918 * this function.
919 */
920static void fc_rport_recv_plogi_req(struct fc_rport *rport,
921 struct fc_seq *sp, struct fc_frame *rx_fp)
922{
923 struct fc_rport_libfc_priv *rdata = rport->dd_data;
924 struct fc_lport *lport = rdata->local_port;
925 struct fc_frame *fp = rx_fp;
926 struct fc_exch *ep;
927 struct fc_frame_header *fh;
928 struct fc_els_flogi *pl;
929 struct fc_seq_els_data rjt_data;
930 u32 sid;
931 u64 wwpn;
932 u64 wwnn;
933 enum fc_els_rjt_reason reject = 0;
934 u32 f_ctl;
935 rjt_data.fp = NULL;
936
937 fh = fc_frame_header_get(fp);
938
939 FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
940 "while in state %s\n", ntoh24(fh->fh_s_id),
941 fc_rport_state(rport));
942
943 sid = ntoh24(fh->fh_s_id);
944 pl = fc_frame_payload_get(fp, sizeof(*pl));
945 if (!pl) {
946 FC_DBG("incoming PLOGI from %x too short\n", sid);
947 WARN_ON(1);
948 /* XXX TBD: send reject? */
949 fc_frame_free(fp);
950 return;
951 }
952 wwpn = get_unaligned_be64(&pl->fl_wwpn);
953 wwnn = get_unaligned_be64(&pl->fl_wwnn);
954
955 /*
956 * If the session was just created, possibly due to the incoming PLOGI,
957 * set the state appropriately and accept the PLOGI.
958 *
959 * If we had also sent a PLOGI, and if the received PLOGI is from a
960 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
961 * "command already in progress".
962 *
963 * XXX TBD: If the session was ready before, the PLOGI should result in
964 * all outstanding exchanges being reset.
965 */
966 switch (rdata->rp_state) {
967 case RPORT_ST_INIT:
968 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
969 "- reject\n", sid, wwpn);
970 reject = ELS_RJT_UNSUP;
971 break;
972 case RPORT_ST_PLOGI:
973 FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
974 sid, rdata->rp_state);
975 if (wwpn < lport->wwpn)
976 reject = ELS_RJT_INPROG;
977 break;
978 case RPORT_ST_PRLI:
979 case RPORT_ST_READY:
980 FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
981 "- ignored for now\n", sid, rdata->rp_state);
982 /* XXX TBD - should reset */
983 break;
984 case RPORT_ST_NONE:
985 default:
986 FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
987 "state %d\n", sid, rdata->rp_state);
988 break;
989 }
990
991 if (reject) {
992 rjt_data.reason = reject;
993 rjt_data.explan = ELS_EXPL_NONE;
994 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
995 fc_frame_free(fp);
996 } else {
997 fp = fc_frame_alloc(lport, sizeof(*pl));
998 if (fp == NULL) {
999 fp = rx_fp;
1000 rjt_data.reason = ELS_RJT_UNAB;
1001 rjt_data.explan = ELS_EXPL_NONE;
1002 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1003 fc_frame_free(fp);
1004 } else {
1005 sp = lport->tt.seq_start_next(sp);
1006 WARN_ON(!sp);
1007 fc_rport_set_name(rport, wwpn, wwnn);
1008
1009 /*
1010 * Get session payload size from incoming PLOGI.
1011 */
1012 rport->maxframe_size =
1013 fc_plogi_get_maxframe(pl, lport->mfs);
1014 fc_frame_free(rx_fp);
1015 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1016
1017 /*
1018 * Send LS_ACC. If this fails,
1019 * the originator should retry.
1020 */
1021 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1022 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1023 ep = fc_seq_exch(sp);
1024 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1025 FC_TYPE_ELS, f_ctl, 0);
1026 lport->tt.seq_send(lport, sp, fp);
1027 if (rdata->rp_state == RPORT_ST_PLOGI)
1028 fc_rport_enter_prli(rport);
1029 }
1030 }
1031}
1032
1033/**
1034 * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
1035 * @rport: Fibre Channel remote port that initiated PRLI
1036 * @sp: current sequence in the PRLI exchange
1037 * @fp: PRLI request frame
1038 *
1039 * Locking Note: The rport lock is exected to be held before calling
1040 * this function.
1041 */
1042static void fc_rport_recv_prli_req(struct fc_rport *rport,
1043 struct fc_seq *sp, struct fc_frame *rx_fp)
1044{
1045 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1046 struct fc_lport *lport = rdata->local_port;
1047 struct fc_exch *ep;
1048 struct fc_frame *fp;
1049 struct fc_frame_header *fh;
1050 struct {
1051 struct fc_els_prli prli;
1052 struct fc_els_spp spp;
1053 } *pp;
1054 struct fc_els_spp *rspp; /* request service param page */
1055 struct fc_els_spp *spp; /* response spp */
1056 unsigned int len;
1057 unsigned int plen;
1058 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1059 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1060 enum fc_els_spp_resp resp;
1061 struct fc_seq_els_data rjt_data;
1062 u32 f_ctl;
1063 u32 fcp_parm;
1064 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1065 rjt_data.fp = NULL;
1066
1067 fh = fc_frame_header_get(rx_fp);
1068
1069 FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
1070 "while in state %s\n", ntoh24(fh->fh_s_id),
1071 fc_rport_state(rport));
1072
1073 switch (rdata->rp_state) {
1074 case RPORT_ST_PRLI:
1075 case RPORT_ST_READY:
1076 reason = ELS_RJT_NONE;
1077 break;
1078 default:
1079 break;
1080 }
1081 len = fr_len(rx_fp) - sizeof(*fh);
1082 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1083 if (pp == NULL) {
1084 reason = ELS_RJT_PROT;
1085 explan = ELS_EXPL_INV_LEN;
1086 } else {
1087 plen = ntohs(pp->prli.prli_len);
1088 if ((plen % 4) != 0 || plen > len) {
1089 reason = ELS_RJT_PROT;
1090 explan = ELS_EXPL_INV_LEN;
1091 } else if (plen < len) {
1092 len = plen;
1093 }
1094 plen = pp->prli.prli_spp_len;
1095 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1096 plen > len || len < sizeof(*pp)) {
1097 reason = ELS_RJT_PROT;
1098 explan = ELS_EXPL_INV_LEN;
1099 }
1100 rspp = &pp->spp;
1101 }
1102 if (reason != ELS_RJT_NONE ||
1103 (fp = fc_frame_alloc(lport, len)) == NULL) {
1104 rjt_data.reason = reason;
1105 rjt_data.explan = explan;
1106 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1107 } else {
1108 sp = lport->tt.seq_start_next(sp);
1109 WARN_ON(!sp);
1110 pp = fc_frame_payload_get(fp, len);
1111 WARN_ON(!pp);
1112 memset(pp, 0, len);
1113 pp->prli.prli_cmd = ELS_LS_ACC;
1114 pp->prli.prli_spp_len = plen;
1115 pp->prli.prli_len = htons(len);
1116 len -= sizeof(struct fc_els_prli);
1117
1118 /*
1119 * Go through all the service parameter pages and build
1120 * response. If plen indicates longer SPP than standard,
1121 * use that. The entire response has been pre-cleared above.
1122 */
1123 spp = &pp->spp;
1124 while (len >= plen) {
1125 spp->spp_type = rspp->spp_type;
1126 spp->spp_type_ext = rspp->spp_type_ext;
1127 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1128 resp = FC_SPP_RESP_ACK;
1129 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1130 resp = FC_SPP_RESP_NO_PA;
1131 switch (rspp->spp_type) {
1132 case 0: /* common to all FC-4 types */
1133 break;
1134 case FC_TYPE_FCP:
1135 fcp_parm = ntohl(rspp->spp_params);
1136 if (fcp_parm * FCP_SPPF_RETRY)
1137 rdata->flags |= FC_RP_FLAGS_RETRY;
1138 rport->supported_classes = FC_COS_CLASS3;
1139 if (fcp_parm & FCP_SPPF_INIT_FCN)
1140 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1141 if (fcp_parm & FCP_SPPF_TARG_FCN)
1142 roles |= FC_RPORT_ROLE_FCP_TARGET;
1143 rport->roles = roles;
1144
1145 spp->spp_params =
1146 htonl(lport->service_params);
1147 break;
1148 default:
1149 resp = FC_SPP_RESP_INVL;
1150 break;
1151 }
1152 spp->spp_flags |= resp;
1153 len -= plen;
1154 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1155 spp = (struct fc_els_spp *)((char *)spp + plen);
1156 }
1157
1158 /*
1159 * Send LS_ACC. If this fails, the originator should retry.
1160 */
1161 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1162 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1163 ep = fc_seq_exch(sp);
1164 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1165 FC_TYPE_ELS, f_ctl, 0);
1166 lport->tt.seq_send(lport, sp, fp);
1167
1168 /*
1169 * Get lock and re-check state.
1170 */
1171 switch (rdata->rp_state) {
1172 case RPORT_ST_PRLI:
1173 fc_rport_enter_ready(rport);
1174 break;
1175 case RPORT_ST_READY:
1176 break;
1177 default:
1178 break;
1179 }
1180 }
1181 fc_frame_free(rx_fp);
1182}
1183
1184/**
1185 * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
1186 * @rport: Fibre Channel remote port that initiated PRLO
1187 * @sp: current sequence in the PRLO exchange
1188 * @fp: PRLO request frame
1189 *
1190 * Locking Note: The rport lock is exected to be held before calling
1191 * this function.
1192 */
1193static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1194 struct fc_frame *fp)
1195{
1196 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1197 struct fc_lport *lport = rdata->local_port;
1198
1199 struct fc_frame_header *fh;
1200 struct fc_seq_els_data rjt_data;
1201
1202 fh = fc_frame_header_get(fp);
1203
1204 FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
1205 "while in state %s\n", ntoh24(fh->fh_s_id),
1206 fc_rport_state(rport));
1207
1208 rjt_data.fp = NULL;
1209 rjt_data.reason = ELS_RJT_UNAB;
1210 rjt_data.explan = ELS_EXPL_NONE;
1211 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1212 fc_frame_free(fp);
1213}
1214
1215/**
1216 * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
1217 * @rport: Fibre Channel remote port that initiated LOGO
1218 * @sp: current sequence in the LOGO exchange
1219 * @fp: LOGO request frame
1220 *
1221 * Locking Note: The rport lock is exected to be held before calling
1222 * this function.
1223 */
1224static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1225 struct fc_frame *fp)
1226{
1227 struct fc_frame_header *fh;
1228 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1229 struct fc_lport *lport = rdata->local_port;
1230
1231 fh = fc_frame_header_get(fp);
1232
1233 FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
1234 "while in state %s\n", ntoh24(fh->fh_s_id),
1235 fc_rport_state(rport));
1236
1237 rdata->event = RPORT_EV_LOGO;
1238 queue_work(rport_event_queue, &rdata->event_work);
1239
1240 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1241 fc_frame_free(fp);
1242}
1243
1244static void fc_rport_flush_queue(void)
1245{
1246 flush_workqueue(rport_event_queue);
1247}
1248
1249
1250int fc_rport_init(struct fc_lport *lport)
1251{
1252 if (!lport->tt.rport_login)
1253 lport->tt.rport_login = fc_rport_login;
1254
1255 if (!lport->tt.rport_logoff)
1256 lport->tt.rport_logoff = fc_rport_logoff;
1257
1258 if (!lport->tt.rport_recv_req)
1259 lport->tt.rport_recv_req = fc_rport_recv_req;
1260
1261 if (!lport->tt.rport_flush_queue)
1262 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(fc_rport_init);
1267
1268int fc_setup_rport()
1269{
1270 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1271 if (!rport_event_queue)
1272 return -ENOMEM;
1273 return 0;
1274}
1275EXPORT_SYMBOL(fc_setup_rport);
1276
1277void fc_destroy_rport()
1278{
1279 destroy_workqueue(rport_event_queue);
1280}
1281EXPORT_SYMBOL(fc_destroy_rport);
1282
1283void fc_rport_terminate_io(struct fc_rport *rport)
1284{
1285 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1286 struct fc_lport *lport = rdata->local_port;
1287
1288 lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
1289 lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
1290}
1291EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3fdee7370ccc..7225b6e2029e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -88,34 +88,47 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
88} 88}
89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 89EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
90 90
91void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task, 91/**
92 struct iscsi_data *hdr) 92 * iscsi_prep_data_out_pdu - initialize Data-Out
93 * @task: scsi command task
94 * @r2t: R2T info
95 * @hdr: iscsi data in pdu
96 *
97 * Notes:
98 * Initialize Data-Out within this R2T sequence and finds
99 * proper data_offset within this SCSI command.
100 *
101 * This function is called with connection lock taken.
102 **/
103void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
104 struct iscsi_data *hdr)
93{ 105{
94 struct iscsi_conn *conn = task->conn; 106 struct iscsi_conn *conn = task->conn;
107 unsigned int left = r2t->data_length - r2t->sent;
108
109 task->hdr_len = sizeof(struct iscsi_data);
95 110
96 memset(hdr, 0, sizeof(struct iscsi_data)); 111 memset(hdr, 0, sizeof(struct iscsi_data));
97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 112 hdr->ttt = r2t->ttt;
98 hdr->datasn = cpu_to_be32(task->unsol_datasn); 113 hdr->datasn = cpu_to_be32(r2t->datasn);
99 task->unsol_datasn++; 114 r2t->datasn++;
100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 115 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 116 memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
102 117 hdr->itt = task->hdr_itt;
103 hdr->itt = task->hdr->itt; 118 hdr->exp_statsn = r2t->exp_statsn;
104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 119 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
105 hdr->offset = cpu_to_be32(task->unsol_offset); 120 if (left > conn->max_xmit_dlength) {
106
107 if (task->unsol_count > conn->max_xmit_dlength) {
108 hton24(hdr->dlength, conn->max_xmit_dlength); 121 hton24(hdr->dlength, conn->max_xmit_dlength);
109 task->data_count = conn->max_xmit_dlength; 122 r2t->data_count = conn->max_xmit_dlength;
110 task->unsol_offset += task->data_count;
111 hdr->flags = 0; 123 hdr->flags = 0;
112 } else { 124 } else {
113 hton24(hdr->dlength, task->unsol_count); 125 hton24(hdr->dlength, left);
114 task->data_count = task->unsol_count; 126 r2t->data_count = left;
115 hdr->flags = ISCSI_FLAG_CMD_FINAL; 127 hdr->flags = ISCSI_FLAG_CMD_FINAL;
116 } 128 }
129 conn->dataout_pdus_cnt++;
117} 130}
118EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 131EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
119 132
120static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) 133static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
121{ 134{
@@ -206,11 +219,24 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
206{ 219{
207 struct iscsi_conn *conn = task->conn; 220 struct iscsi_conn *conn = task->conn;
208 struct iscsi_session *session = conn->session; 221 struct iscsi_session *session = conn->session;
209 struct iscsi_cmd *hdr = task->hdr;
210 struct scsi_cmnd *sc = task->sc; 222 struct scsi_cmnd *sc = task->sc;
223 struct iscsi_cmd *hdr;
211 unsigned hdrlength, cmd_len; 224 unsigned hdrlength, cmd_len;
225 itt_t itt;
212 int rc; 226 int rc;
213 227
228 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
229 if (rc)
230 return rc;
231 hdr = (struct iscsi_cmd *) task->hdr;
232 itt = hdr->itt;
233 memset(hdr, 0, sizeof(*hdr));
234
235 if (session->tt->parse_pdu_itt)
236 hdr->itt = task->hdr_itt = itt;
237 else
238 hdr->itt = task->hdr_itt = build_itt(task->itt,
239 task->conn->session->age);
214 task->hdr_len = 0; 240 task->hdr_len = 0;
215 rc = iscsi_add_hdr(task, sizeof(*hdr)); 241 rc = iscsi_add_hdr(task, sizeof(*hdr));
216 if (rc) 242 if (rc)
@@ -218,8 +244,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
218 hdr->opcode = ISCSI_OP_SCSI_CMD; 244 hdr->opcode = ISCSI_OP_SCSI_CMD;
219 hdr->flags = ISCSI_ATTR_SIMPLE; 245 hdr->flags = ISCSI_ATTR_SIMPLE;
220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 246 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
221 hdr->itt = build_itt(task->itt, session->age); 247 memcpy(task->lun, hdr->lun, sizeof(task->lun));
222 hdr->cmdsn = cpu_to_be32(session->cmdsn); 248 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
223 session->cmdsn++; 249 session->cmdsn++;
224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 250 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
225 cmd_len = sc->cmd_len; 251 cmd_len = sc->cmd_len;
@@ -242,6 +268,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
242 } 268 }
243 if (sc->sc_data_direction == DMA_TO_DEVICE) { 269 if (sc->sc_data_direction == DMA_TO_DEVICE) {
244 unsigned out_len = scsi_out(sc)->length; 270 unsigned out_len = scsi_out(sc)->length;
271 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
272
245 hdr->data_length = cpu_to_be32(out_len); 273 hdr->data_length = cpu_to_be32(out_len);
246 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 274 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
247 /* 275 /*
@@ -254,13 +282,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
254 * without R2T ack right after 282 * without R2T ack right after
255 * immediate data 283 * immediate data
256 * 284 *
257 * r2t_data_count bytes to be sent via R2T ack's 285 * r2t data_length bytes to be sent via R2T ack's
258 * 286 *
259 * pad_count bytes to be sent as zero-padding 287 * pad_count bytes to be sent as zero-padding
260 */ 288 */
261 task->unsol_count = 0; 289 memset(r2t, 0, sizeof(*r2t));
262 task->unsol_offset = 0;
263 task->unsol_datasn = 0;
264 290
265 if (session->imm_data_en) { 291 if (session->imm_data_en) {
266 if (out_len >= session->first_burst) 292 if (out_len >= session->first_burst)
@@ -274,12 +300,14 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
274 zero_data(hdr->dlength); 300 zero_data(hdr->dlength);
275 301
276 if (!session->initial_r2t_en) { 302 if (!session->initial_r2t_en) {
277 task->unsol_count = min(session->first_burst, out_len) 303 r2t->data_length = min(session->first_burst, out_len) -
278 - task->imm_count; 304 task->imm_count;
279 task->unsol_offset = task->imm_count; 305 r2t->data_offset = task->imm_count;
306 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
307 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
280 } 308 }
281 309
282 if (!task->unsol_count) 310 if (!task->unsol_r2t.data_length)
283 /* No unsolicit Data-Out's */ 311 /* No unsolicit Data-Out's */
284 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 312 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
285 } else { 313 } else {
@@ -300,8 +328,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
300 WARN_ON(hdrlength >= 256); 328 WARN_ON(hdrlength >= 256);
301 hdr->hlength = hdrlength & 0xFF; 329 hdr->hlength = hdrlength & 0xFF;
302 330
303 if (conn->session->tt->init_task && 331 if (session->tt->init_task && session->tt->init_task(task))
304 conn->session->tt->init_task(task))
305 return -EIO; 332 return -EIO;
306 333
307 task->state = ISCSI_TASK_RUNNING; 334 task->state = ISCSI_TASK_RUNNING;
@@ -332,6 +359,7 @@ static void iscsi_complete_command(struct iscsi_task *task)
332 struct iscsi_session *session = conn->session; 359 struct iscsi_session *session = conn->session;
333 struct scsi_cmnd *sc = task->sc; 360 struct scsi_cmnd *sc = task->sc;
334 361
362 session->tt->cleanup_task(task);
335 list_del_init(&task->running); 363 list_del_init(&task->running);
336 task->state = ISCSI_TASK_COMPLETED; 364 task->state = ISCSI_TASK_COMPLETED;
337 task->sc = NULL; 365 task->sc = NULL;
@@ -402,8 +430,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
402 * the cmd in the sequencing 430 * the cmd in the sequencing
403 */ 431 */
404 conn->session->queued_cmdsn--; 432 conn->session->queued_cmdsn--;
405 else
406 conn->session->tt->cleanup_task(conn, task);
407 433
408 sc->result = err; 434 sc->result = err;
409 if (!scsi_bidi_cmnd(sc)) 435 if (!scsi_bidi_cmnd(sc))
@@ -423,7 +449,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
423 struct iscsi_task *task) 449 struct iscsi_task *task)
424{ 450{
425 struct iscsi_session *session = conn->session; 451 struct iscsi_session *session = conn->session;
426 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr; 452 struct iscsi_hdr *hdr = task->hdr;
427 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 453 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
428 454
429 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 455 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -437,7 +463,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
437 */ 463 */
438 nop->cmdsn = cpu_to_be32(session->cmdsn); 464 nop->cmdsn = cpu_to_be32(session->cmdsn);
439 if (hdr->itt != RESERVED_ITT) { 465 if (hdr->itt != RESERVED_ITT) {
440 hdr->itt = build_itt(task->itt, session->age);
441 /* 466 /*
442 * TODO: We always use immediate, so we never hit this. 467 * TODO: We always use immediate, so we never hit this.
443 * If we start to send tmfs or nops as non-immediate then 468 * If we start to send tmfs or nops as non-immediate then
@@ -450,12 +475,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
450 } 475 }
451 } 476 }
452 477
453 if (session->tt->init_task) 478 if (session->tt->init_task && session->tt->init_task(task))
454 session->tt->init_task(task); 479 return -EIO;
455 480
456 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 481 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
457 session->state = ISCSI_STATE_LOGGING_OUT; 482 session->state = ISCSI_STATE_LOGGING_OUT;
458 483
484 task->state = ISCSI_TASK_RUNNING;
459 list_move_tail(&task->running, &conn->mgmt_run_list); 485 list_move_tail(&task->running, &conn->mgmt_run_list);
460 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 486 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
461 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, 487 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
@@ -469,6 +495,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
469{ 495{
470 struct iscsi_session *session = conn->session; 496 struct iscsi_session *session = conn->session;
471 struct iscsi_task *task; 497 struct iscsi_task *task;
498 itt_t itt;
472 499
473 if (session->state == ISCSI_STATE_TERMINATE) 500 if (session->state == ISCSI_STATE_TERMINATE)
474 return NULL; 501 return NULL;
@@ -505,23 +532,47 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
505 } else 532 } else
506 task->data_count = 0; 533 task->data_count = 0;
507 534
535 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
536 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
537 "pdu for mgmt task.\n");
538 goto requeue_task;
539 }
540 itt = task->hdr->itt;
541 task->hdr_len = sizeof(struct iscsi_hdr);
508 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 542 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
543
544 if (hdr->itt != RESERVED_ITT) {
545 if (session->tt->parse_pdu_itt)
546 task->hdr->itt = itt;
547 else
548 task->hdr->itt = build_itt(task->itt,
549 task->conn->session->age);
550 }
551
509 INIT_LIST_HEAD(&task->running); 552 INIT_LIST_HEAD(&task->running);
510 list_add_tail(&task->running, &conn->mgmtqueue); 553 list_add_tail(&task->running, &conn->mgmtqueue);
511 554
512 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 555 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
513 if (iscsi_prep_mgmt_task(conn, task)) { 556 if (iscsi_prep_mgmt_task(conn, task))
514 __iscsi_put_task(task); 557 goto free_task;
515 return NULL;
516 }
517 558
518 if (session->tt->xmit_task(task)) 559 if (session->tt->xmit_task(task))
519 task = NULL; 560 goto free_task;
520 561
521 } else 562 } else
522 scsi_queue_work(conn->session->host, &conn->xmitwork); 563 scsi_queue_work(conn->session->host, &conn->xmitwork);
523 564
524 return task; 565 return task;
566
567free_task:
568 __iscsi_put_task(task);
569 return NULL;
570
571requeue_task:
572 if (task != conn->login_task)
573 __kfifo_put(session->cmdpool.queue, (void*)&task,
574 sizeof(void*));
575 return NULL;
525} 576}
526 577
527int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 578int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -709,7 +760,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
709{ 760{
710 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 761 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
711 struct iscsi_hdr rejected_pdu; 762 struct iscsi_hdr rejected_pdu;
712 uint32_t itt;
713 763
714 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 764 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
715 765
@@ -719,10 +769,9 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
719 769
720 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 770 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
721 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 771 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
722 itt = get_itt(rejected_pdu.itt);
723 iscsi_conn_printk(KERN_ERR, conn, 772 iscsi_conn_printk(KERN_ERR, conn,
724 "itt 0x%x had pdu (op 0x%x) rejected " 773 "pdu (op 0x%x) rejected "
725 "due to DataDigest error.\n", itt, 774 "due to DataDigest error.\n",
726 rejected_pdu.opcode); 775 rejected_pdu.opcode);
727 } 776 }
728 } 777 }
@@ -742,12 +791,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
742static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 791static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
743{ 792{
744 struct iscsi_session *session = conn->session; 793 struct iscsi_session *session = conn->session;
745 uint32_t i; 794 int i;
746 795
747 if (itt == RESERVED_ITT) 796 if (itt == RESERVED_ITT)
748 return NULL; 797 return NULL;
749 798
750 i = get_itt(itt); 799 if (session->tt->parse_pdu_itt)
800 session->tt->parse_pdu_itt(conn, itt, &i, NULL);
801 else
802 i = get_itt(itt);
751 if (i >= session->cmds_max) 803 if (i >= session->cmds_max)
752 return NULL; 804 return NULL;
753 805
@@ -922,20 +974,25 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
922int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) 974int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
923{ 975{
924 struct iscsi_session *session = conn->session; 976 struct iscsi_session *session = conn->session;
925 uint32_t i; 977 int age = 0, i = 0;
926 978
927 if (itt == RESERVED_ITT) 979 if (itt == RESERVED_ITT)
928 return 0; 980 return 0;
929 981
930 if (((__force u32)itt & ISCSI_AGE_MASK) != 982 if (session->tt->parse_pdu_itt)
931 (session->age << ISCSI_AGE_SHIFT)) { 983 session->tt->parse_pdu_itt(conn, itt, &i, &age);
984 else {
985 i = get_itt(itt);
986 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
987 }
988
989 if (age != session->age) {
932 iscsi_conn_printk(KERN_ERR, conn, 990 iscsi_conn_printk(KERN_ERR, conn,
933 "received itt %x expected session age (%x)\n", 991 "received itt %x expected session age (%x)\n",
934 (__force u32)itt, session->age); 992 (__force u32)itt, session->age);
935 return ISCSI_ERR_BAD_ITT; 993 return ISCSI_ERR_BAD_ITT;
936 } 994 }
937 995
938 i = get_itt(itt);
939 if (i >= session->cmds_max) { 996 if (i >= session->cmds_max) {
940 iscsi_conn_printk(KERN_ERR, conn, 997 iscsi_conn_printk(KERN_ERR, conn,
941 "received invalid itt index %u (max cmds " 998 "received invalid itt index %u (max cmds "
@@ -1136,8 +1193,13 @@ check_mgmt:
1136 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1193 fail_command(conn, conn->task, DID_IMM_RETRY << 16);
1137 continue; 1194 continue;
1138 } 1195 }
1139 if (iscsi_prep_scsi_cmd_pdu(conn->task)) { 1196 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1140 fail_command(conn, conn->task, DID_ABORT << 16); 1197 if (rc) {
1198 if (rc == -ENOMEM) {
1199 conn->task = NULL;
1200 goto again;
1201 } else
1202 fail_command(conn, conn->task, DID_ABORT << 16);
1141 continue; 1203 continue;
1142 } 1204 }
1143 rc = iscsi_xmit_task(conn); 1205 rc = iscsi_xmit_task(conn);
@@ -1195,6 +1257,26 @@ static void iscsi_xmitworker(struct work_struct *work)
1195 } while (rc >= 0 || rc == -EAGAIN); 1257 } while (rc >= 0 || rc == -EAGAIN);
1196} 1258}
1197 1259
1260static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1261 struct scsi_cmnd *sc)
1262{
1263 struct iscsi_task *task;
1264
1265 if (!__kfifo_get(conn->session->cmdpool.queue,
1266 (void *) &task, sizeof(void *)))
1267 return NULL;
1268
1269 sc->SCp.phase = conn->session->age;
1270 sc->SCp.ptr = (char *) task;
1271
1272 atomic_set(&task->refcount, 1);
1273 task->state = ISCSI_TASK_PENDING;
1274 task->conn = conn;
1275 task->sc = sc;
1276 INIT_LIST_HEAD(&task->running);
1277 return task;
1278}
1279
1198enum { 1280enum {
1199 FAILURE_BAD_HOST = 1, 1281 FAILURE_BAD_HOST = 1,
1200 FAILURE_SESSION_FAILED, 1282 FAILURE_SESSION_FAILED,
@@ -1281,33 +1363,27 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1281 goto reject; 1363 goto reject;
1282 } 1364 }
1283 1365
1284 if (!__kfifo_get(session->cmdpool.queue, (void*)&task, 1366 task = iscsi_alloc_task(conn, sc);
1285 sizeof(void*))) { 1367 if (!task) {
1286 reason = FAILURE_OOM; 1368 reason = FAILURE_OOM;
1287 goto reject; 1369 goto reject;
1288 } 1370 }
1289 sc->SCp.phase = session->age;
1290 sc->SCp.ptr = (char *)task;
1291
1292 atomic_set(&task->refcount, 1);
1293 task->state = ISCSI_TASK_PENDING;
1294 task->conn = conn;
1295 task->sc = sc;
1296 INIT_LIST_HEAD(&task->running);
1297 list_add_tail(&task->running, &conn->xmitqueue); 1371 list_add_tail(&task->running, &conn->xmitqueue);
1298 1372
1299 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1373 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
1300 if (iscsi_prep_scsi_cmd_pdu(task)) { 1374 reason = iscsi_prep_scsi_cmd_pdu(task);
1301 sc->result = DID_ABORT << 16; 1375 if (reason) {
1302 sc->scsi_done = NULL; 1376 if (reason == -ENOMEM) {
1303 iscsi_complete_command(task); 1377 reason = FAILURE_OOM;
1304 goto fault; 1378 goto prepd_reject;
1379 } else {
1380 sc->result = DID_ABORT << 16;
1381 goto prepd_fault;
1382 }
1305 } 1383 }
1306 if (session->tt->xmit_task(task)) { 1384 if (session->tt->xmit_task(task)) {
1307 sc->scsi_done = NULL;
1308 iscsi_complete_command(task);
1309 reason = FAILURE_SESSION_NOT_READY; 1385 reason = FAILURE_SESSION_NOT_READY;
1310 goto reject; 1386 goto prepd_reject;
1311 } 1387 }
1312 } else 1388 } else
1313 scsi_queue_work(session->host, &conn->xmitwork); 1389 scsi_queue_work(session->host, &conn->xmitwork);
@@ -1317,12 +1393,18 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1317 spin_lock(host->host_lock); 1393 spin_lock(host->host_lock);
1318 return 0; 1394 return 0;
1319 1395
1396prepd_reject:
1397 sc->scsi_done = NULL;
1398 iscsi_complete_command(task);
1320reject: 1399reject:
1321 spin_unlock(&session->lock); 1400 spin_unlock(&session->lock);
1322 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1401 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1323 spin_lock(host->host_lock); 1402 spin_lock(host->host_lock);
1324 return SCSI_MLQUEUE_TARGET_BUSY; 1403 return SCSI_MLQUEUE_TARGET_BUSY;
1325 1404
1405prepd_fault:
1406 sc->scsi_done = NULL;
1407 iscsi_complete_command(task);
1326fault: 1408fault:
1327 spin_unlock(&session->lock); 1409 spin_unlock(&session->lock);
1328 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1410 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
@@ -1634,9 +1716,9 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
1634 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1716 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1635 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1717 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1636 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1718 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1637 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 1719 memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
1638 hdr->rtt = task->hdr->itt; 1720 hdr->rtt = task->hdr_itt;
1639 hdr->refcmdsn = task->hdr->cmdsn; 1721 hdr->refcmdsn = task->cmdsn;
1640} 1722}
1641 1723
1642int iscsi_eh_abort(struct scsi_cmnd *sc) 1724int iscsi_eh_abort(struct scsi_cmnd *sc)
@@ -2223,7 +2305,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2223 } 2305 }
2224 spin_unlock_bh(&session->lock); 2306 spin_unlock_bh(&session->lock);
2225 2307
2226 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2308 data = (char *) __get_free_pages(GFP_KERNEL,
2309 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2227 if (!data) 2310 if (!data)
2228 goto login_task_data_alloc_fail; 2311 goto login_task_data_alloc_fail;
2229 conn->login_task->data = conn->data = data; 2312 conn->login_task->data = conn->data = data;
@@ -2294,7 +2377,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2294 iscsi_suspend_tx(conn); 2377 iscsi_suspend_tx(conn);
2295 2378
2296 spin_lock_bh(&session->lock); 2379 spin_lock_bh(&session->lock);
2297 kfree(conn->data); 2380 free_pages((unsigned long) conn->data,
2381 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2298 kfree(conn->persistent_address); 2382 kfree(conn->persistent_address);
2299 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2383 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
2300 sizeof(void*)); 2384 sizeof(void*));
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
new file mode 100644
index 000000000000..a745f91d2928
--- /dev/null
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -0,0 +1,1163 @@
1/*
2 * iSCSI over TCP/IP Data-Path lib
3 *
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 - 2006 Mike Christie
7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
8 * maintained by open-iscsi@googlegroups.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published
12 * by the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * See the file COPYING included with this distribution for more details.
21 *
22 * Credits:
23 * Christoph Hellwig
24 * FUJITA Tomonori
25 * Arne Redlich
26 * Zhenyu Wang
27 */
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/inet.h>
32#include <linux/file.h>
33#include <linux/blkdev.h>
34#include <linux/crypto.h>
35#include <linux/delay.h>
36#include <linux/kfifo.h>
37#include <linux/scatterlist.h>
38#include <net/tcp.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_transport_iscsi.h>
44
45#include "iscsi_tcp.h"
46
47MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
48 "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL");
52#undef DEBUG_TCP
53
54#ifdef DEBUG_TCP
55#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56#else
57#define debug_tcp(fmt...)
58#endif
59
60static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
61 struct iscsi_segment *segment);
62
63/*
64 * Scatterlist handling: inside the iscsi_segment, we
65 * remember an index into the scatterlist, and set data/size
66 * to the current scatterlist entry. For highmem pages, we
67 * kmap as needed.
68 *
69 * Note that the page is unmapped when we return from
70 * TCP's data_ready handler, so we may end up mapping and
71 * unmapping the same page repeatedly. The whole reason
72 * for this is that we shouldn't keep the page mapped
73 * outside the softirq.
74 */
75
76/**
77 * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
78 * @segment: the buffer object
79 * @sg: scatterlist
80 * @offset: byte offset into that sg entry
81 *
82 * This function sets up the segment so that subsequent
83 * data is copied to the indicated sg entry, at the given
84 * offset.
85 */
86static inline void
87iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
88 struct scatterlist *sg, unsigned int offset)
89{
90 segment->sg = sg;
91 segment->sg_offset = offset;
92 segment->size = min(sg->length - offset,
93 segment->total_size - segment->total_copied);
94 segment->data = NULL;
95}
96
97/**
98 * iscsi_tcp_segment_map - map the current S/G page
99 * @segment: iscsi_segment
100 * @recv: 1 if called from recv path
101 *
102 * We only need to possibly kmap data if scatter lists are being used,
103 * because the iscsi passthrough and internal IO paths will never use high
104 * mem pages.
105 */
106static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
107{
108 struct scatterlist *sg;
109
110 if (segment->data != NULL || !segment->sg)
111 return;
112
113 sg = segment->sg;
114 BUG_ON(segment->sg_mapped);
115 BUG_ON(sg->length == 0);
116
117 /*
118 * If the page count is greater than one it is ok to send
119 * to the network layer's zero copy send path. If not we
120 * have to go the slow sendmsg path. We always map for the
121 * recv path.
122 */
123 if (page_count(sg_page(sg)) >= 1 && !recv)
124 return;
125
126 debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
127 segment);
128 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
129 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
130}
131
132void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
133{
134 debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
135
136 if (segment->sg_mapped) {
137 debug_tcp("iscsi_tcp_segment_unmap valid\n");
138 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
139 segment->sg_mapped = NULL;
140 segment->data = NULL;
141 }
142}
143EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
144
145/*
146 * Splice the digest buffer into the buffer
147 */
148static inline void
149iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
150{
151 segment->data = digest;
152 segment->digest_len = ISCSI_DIGEST_SIZE;
153 segment->total_size += ISCSI_DIGEST_SIZE;
154 segment->size = ISCSI_DIGEST_SIZE;
155 segment->copied = 0;
156 segment->sg = NULL;
157 segment->hash = NULL;
158}
159
160/**
161 * iscsi_tcp_segment_done - check whether the segment is complete
162 * @tcp_conn: iscsi tcp connection
163 * @segment: iscsi segment to check
164 * @recv: set to one of this is called from the recv path
165 * @copied: number of bytes copied
166 *
167 * Check if we're done receiving this segment. If the receive
168 * buffer is full but we expect more data, move on to the
169 * next entry in the scatterlist.
170 *
171 * If the amount of data we received isn't a multiple of 4,
172 * we will transparently receive the pad bytes, too.
173 *
174 * This function must be re-entrant.
175 */
176int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
177 struct iscsi_segment *segment, int recv,
178 unsigned copied)
179{
180 static unsigned char padbuf[ISCSI_PAD_LEN];
181 struct scatterlist sg;
182 unsigned int pad;
183
184 debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
185 segment->size, recv ? "recv" : "xmit");
186 if (segment->hash && copied) {
187 /*
188 * If a segment is kmapd we must unmap it before sending
189 * to the crypto layer since that will try to kmap it again.
190 */
191 iscsi_tcp_segment_unmap(segment);
192
193 if (!segment->data) {
194 sg_init_table(&sg, 1);
195 sg_set_page(&sg, sg_page(segment->sg), copied,
196 segment->copied + segment->sg_offset +
197 segment->sg->offset);
198 } else
199 sg_init_one(&sg, segment->data + segment->copied,
200 copied);
201 crypto_hash_update(segment->hash, &sg, copied);
202 }
203
204 segment->copied += copied;
205 if (segment->copied < segment->size) {
206 iscsi_tcp_segment_map(segment, recv);
207 return 0;
208 }
209
210 segment->total_copied += segment->copied;
211 segment->copied = 0;
212 segment->size = 0;
213
214 /* Unmap the current scatterlist page, if there is one. */
215 iscsi_tcp_segment_unmap(segment);
216
217 /* Do we have more scatterlist entries? */
218 debug_tcp("total copied %u total size %u\n", segment->total_copied,
219 segment->total_size);
220 if (segment->total_copied < segment->total_size) {
221 /* Proceed to the next entry in the scatterlist. */
222 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
223 0);
224 iscsi_tcp_segment_map(segment, recv);
225 BUG_ON(segment->size == 0);
226 return 0;
227 }
228
229 /* Do we need to handle padding? */
230 if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
231 pad = iscsi_padding(segment->total_copied);
232 if (pad != 0) {
233 debug_tcp("consume %d pad bytes\n", pad);
234 segment->total_size += pad;
235 segment->size = pad;
236 segment->data = padbuf;
237 return 0;
238 }
239 }
240
241 /*
242 * Set us up for transferring the data digest. hdr digest
243 * is completely handled in hdr done function.
244 */
245 if (segment->hash) {
246 crypto_hash_final(segment->hash, segment->digest);
247 iscsi_tcp_segment_splice_digest(segment,
248 recv ? segment->recv_digest : segment->digest);
249 return 0;
250 }
251
252 return 1;
253}
254EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
255
256/**
257 * iscsi_tcp_segment_recv - copy data to segment
258 * @tcp_conn: the iSCSI TCP connection
259 * @segment: the buffer to copy to
260 * @ptr: data pointer
261 * @len: amount of data available
262 *
263 * This function copies up to @len bytes to the
264 * given buffer, and returns the number of bytes
265 * consumed, which can actually be less than @len.
266 *
267 * If hash digest is enabled, the function will update the
268 * hash while copying.
269 * Combining these two operations doesn't buy us a lot (yet),
270 * but in the future we could implement combined copy+crc,
271 * just way we do for network layer checksums.
272 */
273static int
274iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
275 struct iscsi_segment *segment, const void *ptr,
276 unsigned int len)
277{
278 unsigned int copy = 0, copied = 0;
279
280 while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
281 if (copied == len) {
282 debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
283 len);
284 break;
285 }
286
287 copy = min(len - copied, segment->size - segment->copied);
288 debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
289 memcpy(segment->data + segment->copied, ptr + copied, copy);
290 copied += copy;
291 }
292 return copied;
293}
294
295inline void
296iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
297 unsigned char digest[ISCSI_DIGEST_SIZE])
298{
299 struct scatterlist sg;
300
301 sg_init_one(&sg, hdr, hdrlen);
302 crypto_hash_digest(hash, &sg, hdrlen, digest);
303}
304EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
305
306static inline int
307iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
308 struct iscsi_segment *segment)
309{
310 if (!segment->digest_len)
311 return 1;
312
313 if (memcmp(segment->recv_digest, segment->digest,
314 segment->digest_len)) {
315 debug_scsi("digest mismatch\n");
316 return 0;
317 }
318
319 return 1;
320}
321
322/*
323 * Helper function to set up segment buffer
324 */
325static inline void
326__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
327 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
328{
329 memset(segment, 0, sizeof(*segment));
330 segment->total_size = size;
331 segment->done = done;
332
333 if (hash) {
334 segment->hash = hash;
335 crypto_hash_init(hash);
336 }
337}
338
339inline void
340iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
341 size_t size, iscsi_segment_done_fn_t *done,
342 struct hash_desc *hash)
343{
344 __iscsi_segment_init(segment, size, done, hash);
345 segment->data = data;
346 segment->size = size;
347}
348EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
349
350inline int
351iscsi_segment_seek_sg(struct iscsi_segment *segment,
352 struct scatterlist *sg_list, unsigned int sg_count,
353 unsigned int offset, size_t size,
354 iscsi_segment_done_fn_t *done, struct hash_desc *hash)
355{
356 struct scatterlist *sg;
357 unsigned int i;
358
359 debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
360 offset, size);
361 __iscsi_segment_init(segment, size, done, hash);
362 for_each_sg(sg_list, sg, sg_count, i) {
363 debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
364 sg->offset);
365 if (offset < sg->length) {
366 iscsi_tcp_segment_init_sg(segment, sg, offset);
367 return 0;
368 }
369 offset -= sg->length;
370 }
371
372 return ISCSI_ERR_DATA_OFFSET;
373}
374EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
375
376/**
377 * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
378 * @tcp_conn: iscsi connection to prep for
379 *
380 * This function always passes NULL for the hash argument, because when this
381 * function is called we do not yet know the final size of the header and want
382 * to delay the digest processing until we know that.
383 */
384void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
385{
386 debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
387 tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
388 iscsi_segment_init_linear(&tcp_conn->in.segment,
389 tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
390 iscsi_tcp_hdr_recv_done, NULL);
391}
392EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
393
394/*
395 * Handle incoming reply to any other type of command
396 */
397static int
398iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
399 struct iscsi_segment *segment)
400{
401 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
402 int rc = 0;
403
404 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
405 return ISCSI_ERR_DATA_DGST;
406
407 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
408 conn->data, tcp_conn->in.datalen);
409 if (rc)
410 return rc;
411
412 iscsi_tcp_hdr_recv_prep(tcp_conn);
413 return 0;
414}
415
416static void
417iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
418{
419 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
420 struct hash_desc *rx_hash = NULL;
421
422 if (conn->datadgst_en &
423 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
424 rx_hash = tcp_conn->rx_hash;
425
426 iscsi_segment_init_linear(&tcp_conn->in.segment,
427 conn->data, tcp_conn->in.datalen,
428 iscsi_tcp_data_recv_done, rx_hash);
429}
430
431/**
432 * iscsi_tcp_cleanup_task - free tcp_task resources
433 * @task: iscsi task
434 *
435 * must be called with session lock
436 */
437void iscsi_tcp_cleanup_task(struct iscsi_task *task)
438{
439 struct iscsi_tcp_task *tcp_task = task->dd_data;
440 struct iscsi_r2t_info *r2t;
441
442 /* nothing to do for mgmt or pending tasks */
443 if (!task->sc || task->state == ISCSI_TASK_PENDING)
444 return;
445
446 /* flush task's r2t queues */
447 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
448 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
449 sizeof(void*));
450 debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
451 }
452
453 r2t = tcp_task->r2t;
454 if (r2t != NULL) {
455 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
456 sizeof(void*));
457 tcp_task->r2t = NULL;
458 }
459}
460EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
461
462/**
463 * iscsi_tcp_data_in - SCSI Data-In Response processing
464 * @conn: iscsi connection
465 * @task: scsi command task
466 */
467static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
468{
469 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
470 struct iscsi_tcp_task *tcp_task = task->dd_data;
471 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
472 int datasn = be32_to_cpu(rhdr->datasn);
473 unsigned total_in_length = scsi_in(task->sc)->length;
474
475 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
476 if (tcp_conn->in.datalen == 0)
477 return 0;
478
479 if (tcp_task->exp_datasn != datasn) {
480 debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
481 __func__, tcp_task->exp_datasn, datasn);
482 return ISCSI_ERR_DATASN;
483 }
484
485 tcp_task->exp_datasn++;
486
487 tcp_task->data_offset = be32_to_cpu(rhdr->offset);
488 if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
489 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
490 __func__, tcp_task->data_offset,
491 tcp_conn->in.datalen, total_in_length);
492 return ISCSI_ERR_DATA_OFFSET;
493 }
494
495 conn->datain_pdus_cnt++;
496 return 0;
497}
498
499/**
500 * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
501 * @conn: iscsi connection
502 * @task: scsi command task
503 */
504static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
505{
506 struct iscsi_session *session = conn->session;
507 struct iscsi_tcp_task *tcp_task = task->dd_data;
508 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
509 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
510 struct iscsi_r2t_info *r2t;
511 int r2tsn = be32_to_cpu(rhdr->r2tsn);
512 int rc;
513
514 if (tcp_conn->in.datalen) {
515 iscsi_conn_printk(KERN_ERR, conn,
516 "invalid R2t with datalen %d\n",
517 tcp_conn->in.datalen);
518 return ISCSI_ERR_DATALEN;
519 }
520
521 if (tcp_task->exp_datasn != r2tsn){
522 debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
523 __func__, tcp_task->exp_datasn, r2tsn);
524 return ISCSI_ERR_R2TSN;
525 }
526
527 /* fill-in new R2T associated with the task */
528 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
529
530 if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
531 iscsi_conn_printk(KERN_INFO, conn,
532 "dropping R2T itt %d in recovery.\n",
533 task->itt);
534 return 0;
535 }
536
537 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
538 if (!rc) {
539 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
540 "Target has sent more R2Ts than it "
541 "negotiated for or driver has has leaked.\n");
542 return ISCSI_ERR_PROTO;
543 }
544
545 r2t->exp_statsn = rhdr->statsn;
546 r2t->data_length = be32_to_cpu(rhdr->data_length);
547 if (r2t->data_length == 0) {
548 iscsi_conn_printk(KERN_ERR, conn,
549 "invalid R2T with zero data len\n");
550 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
551 sizeof(void*));
552 return ISCSI_ERR_DATALEN;
553 }
554
555 if (r2t->data_length > session->max_burst)
556 debug_scsi("invalid R2T with data len %u and max burst %u."
557 "Attempting to execute request.\n",
558 r2t->data_length, session->max_burst);
559
560 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
561 if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
562 iscsi_conn_printk(KERN_ERR, conn,
563 "invalid R2T with data len %u at offset %u "
564 "and total length %d\n", r2t->data_length,
565 r2t->data_offset, scsi_out(task->sc)->length);
566 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
567 sizeof(void*));
568 return ISCSI_ERR_DATALEN;
569 }
570
571 r2t->ttt = rhdr->ttt; /* no flip */
572 r2t->datasn = 0;
573 r2t->sent = 0;
574
575 tcp_task->exp_datasn = r2tsn + 1;
576 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
577 conn->r2t_pdus_cnt++;
578
579 iscsi_requeue_task(task);
580 return 0;
581}
582
583/*
584 * Handle incoming reply to DataIn command
585 */
586static int
587iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
588 struct iscsi_segment *segment)
589{
590 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
591 struct iscsi_hdr *hdr = tcp_conn->in.hdr;
592 int rc;
593
594 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
595 return ISCSI_ERR_DATA_DGST;
596
597 /* check for non-exceptional status */
598 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
599 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
600 if (rc)
601 return rc;
602 }
603
604 iscsi_tcp_hdr_recv_prep(tcp_conn);
605 return 0;
606}
607
608/**
609 * iscsi_tcp_hdr_dissect - process PDU header
610 * @conn: iSCSI connection
611 * @hdr: PDU header
612 *
613 * This function analyzes the header of the PDU received,
614 * and performs several sanity checks. If the PDU is accompanied
615 * by data, the receive buffer is set up to copy the incoming data
616 * to the correct location.
617 */
618static int
619iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
620{
621 int rc = 0, opcode, ahslen;
622 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
623 struct iscsi_task *task;
624
625 /* verify PDU length */
626 tcp_conn->in.datalen = ntoh24(hdr->dlength);
627 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
628 iscsi_conn_printk(KERN_ERR, conn,
629 "iscsi_tcp: datalen %d > %d\n",
630 tcp_conn->in.datalen, conn->max_recv_dlength);
631 return ISCSI_ERR_DATALEN;
632 }
633
634 /* Additional header segments. So far, we don't
635 * process additional headers.
636 */
637 ahslen = hdr->hlength << 2;
638
639 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
640 /* verify itt (itt encoding: age+cid+itt) */
641 rc = iscsi_verify_itt(conn, hdr->itt);
642 if (rc)
643 return rc;
644
645 debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
646 opcode, ahslen, tcp_conn->in.datalen);
647
648 switch(opcode) {
649 case ISCSI_OP_SCSI_DATA_IN:
650 spin_lock(&conn->session->lock);
651 task = iscsi_itt_to_ctask(conn, hdr->itt);
652 if (!task)
653 rc = ISCSI_ERR_BAD_ITT;
654 else
655 rc = iscsi_tcp_data_in(conn, task);
656 if (rc) {
657 spin_unlock(&conn->session->lock);
658 break;
659 }
660
661 if (tcp_conn->in.datalen) {
662 struct iscsi_tcp_task *tcp_task = task->dd_data;
663 struct hash_desc *rx_hash = NULL;
664 struct scsi_data_buffer *sdb = scsi_in(task->sc);
665
666 /*
667 * Setup copy of Data-In into the Scsi_Cmnd
668 * Scatterlist case:
669 * We set up the iscsi_segment to point to the next
670 * scatterlist entry to copy to. As we go along,
671 * we move on to the next scatterlist entry and
672 * update the digest per-entry.
673 */
674 if (conn->datadgst_en &&
675 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
676 rx_hash = tcp_conn->rx_hash;
677
678 debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
679 "datalen=%d)\n", tcp_conn,
680 tcp_task->data_offset,
681 tcp_conn->in.datalen);
682 rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
683 sdb->table.sgl,
684 sdb->table.nents,
685 tcp_task->data_offset,
686 tcp_conn->in.datalen,
687 iscsi_tcp_process_data_in,
688 rx_hash);
689 spin_unlock(&conn->session->lock);
690 return rc;
691 }
692 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
693 spin_unlock(&conn->session->lock);
694 break;
695 case ISCSI_OP_SCSI_CMD_RSP:
696 if (tcp_conn->in.datalen) {
697 iscsi_tcp_data_recv_prep(tcp_conn);
698 return 0;
699 }
700 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
701 break;
702 case ISCSI_OP_R2T:
703 spin_lock(&conn->session->lock);
704 task = iscsi_itt_to_ctask(conn, hdr->itt);
705 if (!task)
706 rc = ISCSI_ERR_BAD_ITT;
707 else if (ahslen)
708 rc = ISCSI_ERR_AHSLEN;
709 else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
710 rc = iscsi_tcp_r2t_rsp(conn, task);
711 else
712 rc = ISCSI_ERR_PROTO;
713 spin_unlock(&conn->session->lock);
714 break;
715 case ISCSI_OP_LOGIN_RSP:
716 case ISCSI_OP_TEXT_RSP:
717 case ISCSI_OP_REJECT:
718 case ISCSI_OP_ASYNC_EVENT:
719 /*
720 * It is possible that we could get a PDU with a buffer larger
721 * than 8K, but there are no targets that currently do this.
722 * For now we fail until we find a vendor that needs it
723 */
724 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
725 iscsi_conn_printk(KERN_ERR, conn,
726 "iscsi_tcp: received buffer of "
727 "len %u but conn buffer is only %u "
728 "(opcode %0x)\n",
729 tcp_conn->in.datalen,
730 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
731 rc = ISCSI_ERR_PROTO;
732 break;
733 }
734
735 /* If there's data coming in with the response,
736 * receive it to the connection's buffer.
737 */
738 if (tcp_conn->in.datalen) {
739 iscsi_tcp_data_recv_prep(tcp_conn);
740 return 0;
741 }
742 /* fall through */
743 case ISCSI_OP_LOGOUT_RSP:
744 case ISCSI_OP_NOOP_IN:
745 case ISCSI_OP_SCSI_TMFUNC_RSP:
746 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
747 break;
748 default:
749 rc = ISCSI_ERR_BAD_OPCODE;
750 break;
751 }
752
753 if (rc == 0) {
754 /* Anything that comes with data should have
755 * been handled above. */
756 if (tcp_conn->in.datalen)
757 return ISCSI_ERR_PROTO;
758 iscsi_tcp_hdr_recv_prep(tcp_conn);
759 }
760
761 return rc;
762}
763
764/**
765 * iscsi_tcp_hdr_recv_done - process PDU header
766 *
767 * This is the callback invoked when the PDU header has
768 * been received. If the header is followed by additional
769 * header segments, we go back for more data.
770 */
771static int
772iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
773 struct iscsi_segment *segment)
774{
775 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
776 struct iscsi_hdr *hdr;
777
778 /* Check if there are additional header segments
779 * *prior* to computing the digest, because we
780 * may need to go back to the caller for more.
781 */
782 hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
783 if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
784 /* Bump the header length - the caller will
785 * just loop around and get the AHS for us, and
786 * call again. */
787 unsigned int ahslen = hdr->hlength << 2;
788
789 /* Make sure we don't overflow */
790 if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
791 return ISCSI_ERR_AHSLEN;
792
793 segment->total_size += ahslen;
794 segment->size += ahslen;
795 return 0;
796 }
797
798 /* We're done processing the header. See if we're doing
799 * header digests; if so, set up the recv_digest buffer
800 * and go back for more. */
801 if (conn->hdrdgst_en &&
802 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
803 if (segment->digest_len == 0) {
804 /*
805 * Even if we offload the digest processing we
806 * splice it in so we can increment the skb/segment
807 * counters in preparation for the data segment.
808 */
809 iscsi_tcp_segment_splice_digest(segment,
810 segment->recv_digest);
811 return 0;
812 }
813
814 iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
815 segment->total_copied - ISCSI_DIGEST_SIZE,
816 segment->digest);
817
818 if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
819 return ISCSI_ERR_HDR_DGST;
820 }
821
822 tcp_conn->in.hdr = hdr;
823 return iscsi_tcp_hdr_dissect(conn, hdr);
824}
825
826/**
827 * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
828 * @tcp_conn: iscsi tcp conn
829 *
830 * returns non zero if we are currently processing or setup to process
831 * a header.
832 */
833inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
834{
835 return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
836}
837EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
838
839/**
840 * iscsi_tcp_recv_skb - Process skb
841 * @conn: iscsi connection
842 * @skb: network buffer with header and/or data segment
843 * @offset: offset in skb
844 * @offload: bool indicating if transfer was offloaded
845 *
846 * Will return status of transfer in status. And will return
847 * number of bytes copied.
848 */
849int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
850 unsigned int offset, bool offloaded, int *status)
851{
852 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
853 struct iscsi_segment *segment = &tcp_conn->in.segment;
854 struct skb_seq_state seq;
855 unsigned int consumed = 0;
856 int rc = 0;
857
858 debug_tcp("in %d bytes\n", skb->len - offset);
859
860 if (unlikely(conn->suspend_rx)) {
861 debug_tcp("conn %d Rx suspended!\n", conn->id);
862 *status = ISCSI_TCP_SUSPENDED;
863 return 0;
864 }
865
866 if (offloaded) {
867 segment->total_copied = segment->total_size;
868 goto segment_done;
869 }
870
871 skb_prepare_seq_read(skb, offset, skb->len, &seq);
872 while (1) {
873 unsigned int avail;
874 const u8 *ptr;
875
876 avail = skb_seq_read(consumed, &ptr, &seq);
877 if (avail == 0) {
878 debug_tcp("no more data avail. Consumed %d\n",
879 consumed);
880 *status = ISCSI_TCP_SKB_DONE;
881 skb_abort_seq_read(&seq);
882 goto skb_done;
883 }
884 BUG_ON(segment->copied >= segment->size);
885
886 debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
887 rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
888 BUG_ON(rc == 0);
889 consumed += rc;
890
891 if (segment->total_copied >= segment->total_size) {
892 skb_abort_seq_read(&seq);
893 goto segment_done;
894 }
895 }
896
897segment_done:
898 *status = ISCSI_TCP_SEGMENT_DONE;
899 debug_tcp("segment done\n");
900 rc = segment->done(tcp_conn, segment);
901 if (rc != 0) {
902 *status = ISCSI_TCP_CONN_ERR;
903 debug_tcp("Error receiving PDU, errno=%d\n", rc);
904 iscsi_conn_failure(conn, rc);
905 return 0;
906 }
907 /* The done() functions sets up the next segment. */
908
909skb_done:
910 conn->rxdata_octets += consumed;
911 return consumed;
912}
913EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
914
915/**
916 * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
917 * @conn: iscsi connection
918 * @task: scsi command task
919 * @sc: scsi command
920 */
921int iscsi_tcp_task_init(struct iscsi_task *task)
922{
923 struct iscsi_tcp_task *tcp_task = task->dd_data;
924 struct iscsi_conn *conn = task->conn;
925 struct scsi_cmnd *sc = task->sc;
926 int err;
927
928 if (!sc) {
929 /*
930 * mgmt tasks do not have a scatterlist since they come
931 * in from the iscsi interface.
932 */
933 debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
934 task->itt);
935
936 return conn->session->tt->init_pdu(task, 0, task->data_count);
937 }
938
939 BUG_ON(__kfifo_len(tcp_task->r2tqueue));
940 tcp_task->exp_datasn = 0;
941
942 /* Prepare PDU, optionally w/ immediate data */
943 debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
944 conn->id, task->itt, task->imm_count,
945 task->unsol_r2t.data_length);
946
947 err = conn->session->tt->init_pdu(task, 0, task->imm_count);
948 if (err)
949 return err;
950 task->imm_count = 0;
951 return 0;
952}
953EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
954
955static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
956{
957 struct iscsi_session *session = task->conn->session;
958 struct iscsi_tcp_task *tcp_task = task->dd_data;
959 struct iscsi_r2t_info *r2t = NULL;
960
961 if (iscsi_task_has_unsol_data(task))
962 r2t = &task->unsol_r2t;
963 else {
964 spin_lock_bh(&session->lock);
965 if (tcp_task->r2t) {
966 r2t = tcp_task->r2t;
967 /* Continue with this R2T? */
968 if (r2t->data_length <= r2t->sent) {
969 debug_scsi(" done with r2t %p\n", r2t);
970 __kfifo_put(tcp_task->r2tpool.queue,
971 (void *)&tcp_task->r2t,
972 sizeof(void *));
973 tcp_task->r2t = r2t = NULL;
974 }
975 }
976
977 if (r2t == NULL) {
978 __kfifo_get(tcp_task->r2tqueue,
979 (void *)&tcp_task->r2t, sizeof(void *));
980 r2t = tcp_task->r2t;
981 }
982 spin_unlock_bh(&session->lock);
983 }
984
985 return r2t;
986}
987
988/**
989 * iscsi_tcp_task_xmit - xmit normal PDU task
990 * @task: iscsi command task
991 *
992 * We're expected to return 0 when everything was transmitted succesfully,
993 * -EAGAIN if there's still data in the queue, or != 0 for any other kind
994 * of error.
995 */
996int iscsi_tcp_task_xmit(struct iscsi_task *task)
997{
998 struct iscsi_conn *conn = task->conn;
999 struct iscsi_session *session = conn->session;
1000 struct iscsi_r2t_info *r2t;
1001 int rc = 0;
1002
1003flush:
1004 /* Flush any pending data first. */
1005 rc = session->tt->xmit_pdu(task);
1006 if (rc < 0)
1007 return rc;
1008
1009 /* mgmt command */
1010 if (!task->sc) {
1011 if (task->hdr->itt == RESERVED_ITT)
1012 iscsi_put_task(task);
1013 return 0;
1014 }
1015
1016 /* Are we done already? */
1017 if (task->sc->sc_data_direction != DMA_TO_DEVICE)
1018 return 0;
1019
1020 r2t = iscsi_tcp_get_curr_r2t(task);
1021 if (r2t == NULL) {
1022 /* Waiting for more R2Ts to arrive. */
1023 debug_tcp("no R2Ts yet\n");
1024 return 0;
1025 }
1026
1027 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
1028 if (rc)
1029 return rc;
1030 iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
1031
1032 debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1033 r2t, r2t->datasn - 1, task->hdr->itt,
1034 r2t->data_offset + r2t->sent, r2t->data_count);
1035
1036 rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
1037 r2t->data_count);
1038 if (rc)
1039 return rc;
1040 r2t->sent += r2t->data_count;
1041 goto flush;
1042}
1043EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
1044
1045struct iscsi_cls_conn *
1046iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
1047 uint32_t conn_idx)
1048
1049{
1050 struct iscsi_conn *conn;
1051 struct iscsi_cls_conn *cls_conn;
1052 struct iscsi_tcp_conn *tcp_conn;
1053
1054 cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
1055 if (!cls_conn)
1056 return NULL;
1057 conn = cls_conn->dd_data;
1058 /*
1059 * due to strange issues with iser these are not set
1060 * in iscsi_conn_setup
1061 */
1062 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1063
1064 tcp_conn = conn->dd_data;
1065 tcp_conn->iscsi_conn = conn;
1066
1067 tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
1068 if (!tcp_conn->dd_data) {
1069 iscsi_conn_teardown(cls_conn);
1070 return NULL;
1071 }
1072 return cls_conn;
1073}
1074EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
1075
1076void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
1077{
1078 struct iscsi_conn *conn = cls_conn->dd_data;
1079 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1080
1081 kfree(tcp_conn->dd_data);
1082 iscsi_conn_teardown(cls_conn);
1083}
1084EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
1085
1086int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1087{
1088 int i;
1089 int cmd_i;
1090
1091 /*
1092 * initialize per-task: R2T pool and xmit queue
1093 */
1094 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1095 struct iscsi_task *task = session->cmds[cmd_i];
1096 struct iscsi_tcp_task *tcp_task = task->dd_data;
1097
1098 /*
1099 * pre-allocated x2 as much r2ts to handle race when
1100 * target acks DataOut faster than we data_xmit() queues
1101 * could replenish r2tqueue.
1102 */
1103
1104 /* R2T pool */
1105 if (iscsi_pool_init(&tcp_task->r2tpool,
1106 session->max_r2t * 2, NULL,
1107 sizeof(struct iscsi_r2t_info))) {
1108 goto r2t_alloc_fail;
1109 }
1110
1111 /* R2T xmit queue */
1112 tcp_task->r2tqueue = kfifo_alloc(
1113 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
1114 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1115 iscsi_pool_free(&tcp_task->r2tpool);
1116 goto r2t_alloc_fail;
1117 }
1118 }
1119
1120 return 0;
1121
1122r2t_alloc_fail:
1123 for (i = 0; i < cmd_i; i++) {
1124 struct iscsi_task *task = session->cmds[i];
1125 struct iscsi_tcp_task *tcp_task = task->dd_data;
1126
1127 kfifo_free(tcp_task->r2tqueue);
1128 iscsi_pool_free(&tcp_task->r2tpool);
1129 }
1130 return -ENOMEM;
1131}
1132EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
1133
1134void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1135{
1136 int i;
1137
1138 for (i = 0; i < session->cmds_max; i++) {
1139 struct iscsi_task *task = session->cmds[i];
1140 struct iscsi_tcp_task *tcp_task = task->dd_data;
1141
1142 kfifo_free(tcp_task->r2tqueue);
1143 iscsi_pool_free(&tcp_task->r2tpool);
1144 }
1145}
1146EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
1147
1148void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1149 struct iscsi_stats *stats)
1150{
1151 struct iscsi_conn *conn = cls_conn->dd_data;
1152
1153 stats->txdata_octets = conn->txdata_octets;
1154 stats->rxdata_octets = conn->rxdata_octets;
1155 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1156 stats->dataout_pdus = conn->dataout_pdus_cnt;
1157 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1158 stats->datain_pdus = conn->datain_pdus_cnt;
1159 stats->r2t_pdus = conn->r2t_pdus_cnt;
1160 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1161 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1162}
1163EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 60a9e6e9384b..dcba267db711 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,8 +29,10 @@ struct lpfc_sli2_slim;
29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 29#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
30 the NameServer before giving up. */ 30 the NameServer before giving up. */
31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 31#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
32#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 32#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
33#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ 33#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
34#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
35#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 36#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 37#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 38#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@@ -354,8 +356,6 @@ struct lpfc_vport {
354 uint8_t load_flag; 356 uint8_t load_flag;
355#define FC_LOADING 0x1 /* HBA in process of loading drvr */ 357#define FC_LOADING 0x1 /* HBA in process of loading drvr */
356#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ 358#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
357 char *vname; /* Application assigned name */
358
359 /* Vport Config Parameters */ 359 /* Vport Config Parameters */
360 uint32_t cfg_scan_down; 360 uint32_t cfg_scan_down;
361 uint32_t cfg_lun_queue_depth; 361 uint32_t cfg_lun_queue_depth;
@@ -376,7 +376,7 @@ struct lpfc_vport {
376 376
377 struct fc_vport *fc_vport; 377 struct fc_vport *fc_vport;
378 378
379#ifdef CONFIG_LPFC_DEBUG_FS 379#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
380 struct dentry *debug_disc_trc; 380 struct dentry *debug_disc_trc;
381 struct dentry *debug_nodelist; 381 struct dentry *debug_nodelist;
382 struct dentry *vport_debugfs_root; 382 struct dentry *vport_debugfs_root;
@@ -428,6 +428,7 @@ struct lpfc_hba {
428#define LPFC_SLI3_VPORT_TEARDOWN 0x04 428#define LPFC_SLI3_VPORT_TEARDOWN 0x04
429#define LPFC_SLI3_CRP_ENABLED 0x08 429#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 430#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20
431 uint32_t iocb_cmd_size; 432 uint32_t iocb_cmd_size;
432 uint32_t iocb_rsp_size; 433 uint32_t iocb_rsp_size;
433 434
@@ -501,12 +502,14 @@ struct lpfc_hba {
501 uint32_t cfg_poll_tmo; 502 uint32_t cfg_poll_tmo;
502 uint32_t cfg_use_msi; 503 uint32_t cfg_use_msi;
503 uint32_t cfg_sg_seg_cnt; 504 uint32_t cfg_sg_seg_cnt;
505 uint32_t cfg_prot_sg_seg_cnt;
504 uint32_t cfg_sg_dma_buf_size; 506 uint32_t cfg_sg_dma_buf_size;
505 uint64_t cfg_soft_wwnn; 507 uint64_t cfg_soft_wwnn;
506 uint64_t cfg_soft_wwpn; 508 uint64_t cfg_soft_wwpn;
507 uint32_t cfg_hba_queue_depth; 509 uint32_t cfg_hba_queue_depth;
508 uint32_t cfg_enable_hba_reset; 510 uint32_t cfg_enable_hba_reset;
509 uint32_t cfg_enable_hba_heartbeat; 511 uint32_t cfg_enable_hba_heartbeat;
512 uint32_t cfg_enable_bg;
510 513
511 lpfc_vpd_t vpd; /* vital product data */ 514 lpfc_vpd_t vpd; /* vital product data */
512 515
@@ -572,6 +575,9 @@ struct lpfc_hba {
572 uint64_t fc4InputRequests; 575 uint64_t fc4InputRequests;
573 uint64_t fc4OutputRequests; 576 uint64_t fc4OutputRequests;
574 uint64_t fc4ControlRequests; 577 uint64_t fc4ControlRequests;
578 uint64_t bg_guard_err_cnt;
579 uint64_t bg_apptag_err_cnt;
580 uint64_t bg_reftag_err_cnt;
575 581
576 struct lpfc_sysfs_mbox sysfs_mbox; 582 struct lpfc_sysfs_mbox sysfs_mbox;
577 583
@@ -594,6 +600,8 @@ struct lpfc_hba {
594 600
595 struct fc_host_statistics link_stats; 601 struct fc_host_statistics link_stats;
596 enum intr_type_t intr_type; 602 enum intr_type_t intr_type;
603 uint32_t intr_mode;
604#define LPFC_INTR_ERROR 0xFFFFFFFF
597 struct msix_entry msix_entries[LPFC_MSIX_VECTORS]; 605 struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
598 606
599 struct list_head port_list; 607 struct list_head port_list;
@@ -613,12 +621,14 @@ struct lpfc_hba {
613 unsigned long last_rsrc_error_time; 621 unsigned long last_rsrc_error_time;
614 unsigned long last_ramp_down_time; 622 unsigned long last_ramp_down_time;
615 unsigned long last_ramp_up_time; 623 unsigned long last_ramp_up_time;
616#ifdef CONFIG_LPFC_DEBUG_FS 624#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
617 struct dentry *hba_debugfs_root; 625 struct dentry *hba_debugfs_root;
618 atomic_t debugfs_vport_count; 626 atomic_t debugfs_vport_count;
619 struct dentry *debug_hbqinfo; 627 struct dentry *debug_hbqinfo;
620 struct dentry *debug_dumpHostSlim; 628 struct dentry *debug_dumpHostSlim;
621 struct dentry *debug_dumpHBASlim; 629 struct dentry *debug_dumpHBASlim;
630 struct dentry *debug_dumpData; /* BlockGuard BPL*/
631 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
622 struct dentry *debug_slow_ring_trc; 632 struct dentry *debug_slow_ring_trc;
623 struct lpfc_debugfs_trc *slow_ring_trc; 633 struct lpfc_debugfs_trc *slow_ring_trc;
624 atomic_t slow_ring_trc_cnt; 634 atomic_t slow_ring_trc_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index aa3d6277581d..40cf0f4f327f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -96,6 +96,61 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
96 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 96 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
97} 97}
98 98
99static ssize_t
100lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102{
103 struct Scsi_Host *shost = class_to_shost(dev);
104 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
105 struct lpfc_hba *phba = vport->phba;
106
107 if (phba->cfg_enable_bg)
108 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
109 return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
110 else
111 return snprintf(buf, PAGE_SIZE,
112 "BlockGuard Not Supported\n");
113 else
114 return snprintf(buf, PAGE_SIZE,
115 "BlockGuard Disabled\n");
116}
117
118static ssize_t
119lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
120 char *buf)
121{
122 struct Scsi_Host *shost = class_to_shost(dev);
123 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
124 struct lpfc_hba *phba = vport->phba;
125
126 return snprintf(buf, PAGE_SIZE, "%llu\n",
127 (unsigned long long)phba->bg_guard_err_cnt);
128}
129
130static ssize_t
131lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
133{
134 struct Scsi_Host *shost = class_to_shost(dev);
135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
136 struct lpfc_hba *phba = vport->phba;
137
138 return snprintf(buf, PAGE_SIZE, "%llu\n",
139 (unsigned long long)phba->bg_apptag_err_cnt);
140}
141
142static ssize_t
143lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
144 char *buf)
145{
146 struct Scsi_Host *shost = class_to_shost(dev);
147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
148 struct lpfc_hba *phba = vport->phba;
149
150 return snprintf(buf, PAGE_SIZE, "%llu\n",
151 (unsigned long long)phba->bg_reftag_err_cnt);
152}
153
99/** 154/**
100 * lpfc_info_show: Return some pci info about the host in ascii. 155 * lpfc_info_show: Return some pci info about the host in ascii.
101 * @dev: class converted to a Scsi_host structure. 156 * @dev: class converted to a Scsi_host structure.
@@ -1485,6 +1540,10 @@ lpfc_vport_param_store(name)\
1485static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ 1540static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
1486 lpfc_##name##_show, lpfc_##name##_store) 1541 lpfc_##name##_show, lpfc_##name##_store)
1487 1542
1543static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
1544static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
1545static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
1546static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
1488static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL); 1547static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
1489static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL); 1548static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
1490static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 1549static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@@ -1970,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
1970# LOG_LINK_EVENT 0x10 Link events 2029# LOG_LINK_EVENT 0x10 Link events
1971# LOG_FCP 0x40 FCP traffic history 2030# LOG_FCP 0x40 FCP traffic history
1972# LOG_NODE 0x80 Node table events 2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
1973# LOG_MISC 0x400 Miscellaneous events 2033# LOG_MISC 0x400 Miscellaneous events
1974# LOG_SLI 0x800 SLI events 2034# LOG_SLI 0x800 SLI events
1975# LOG_FCP_ERROR 0x1000 Only log FCP errors 2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
@@ -2769,6 +2829,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
2769LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); 2829LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2770 2830
2771/* 2831/*
2832# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
2833# 0 = BlockGuard disabled (default)
2834# 1 = BlockGuard enabled
2835# Value range is [0,1]. Default value is 0.
2836*/
2837LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2838
2839
2840/*
2841# lpfc_prot_mask: i
2842# - Bit mask of host protection capabilities used to register with the
2843# SCSI mid-layer
2844# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
2845# - Allows you to ultimately specify which profiles to use
2846# - Default will result in registering capabilities for all profiles.
2847#
2848*/
2849unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION;
2850
2851module_param(lpfc_prot_mask, uint, 0);
2852MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
2853
2854/*
2855# lpfc_prot_guard: i
2856# - Bit mask of protection guard types to register with the SCSI mid-layer
2857# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC
2858# - Allows you to ultimately specify which profiles to use
2859# - Default will result in registering capabilities for all guard types
2860#
2861*/
2862unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
2863module_param(lpfc_prot_guard, byte, 0);
2864MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
2865
2866
2867/*
2772 * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count 2868 * lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
2773 * This value can be set to values between 64 and 256. The default value is 2869 * This value can be set to values between 64 and 256. The default value is
2774 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 2870 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
@@ -2777,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2777LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 2873LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
2778 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 2874 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
2779 2875
2876LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
2877 LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
2878 "Max Protection Scatter Gather Segment Count");
2879
2780struct device_attribute *lpfc_hba_attrs[] = { 2880struct device_attribute *lpfc_hba_attrs[] = {
2881 &dev_attr_bg_info,
2882 &dev_attr_bg_guard_err,
2883 &dev_attr_bg_apptag_err,
2884 &dev_attr_bg_reftag_err,
2781 &dev_attr_info, 2885 &dev_attr_info,
2782 &dev_attr_serialnum, 2886 &dev_attr_serialnum,
2783 &dev_attr_modeldesc, 2887 &dev_attr_modeldesc,
@@ -2825,6 +2929,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2825 &dev_attr_lpfc_poll, 2929 &dev_attr_lpfc_poll,
2826 &dev_attr_lpfc_poll_tmo, 2930 &dev_attr_lpfc_poll_tmo,
2827 &dev_attr_lpfc_use_msi, 2931 &dev_attr_lpfc_use_msi,
2932 &dev_attr_lpfc_enable_bg,
2828 &dev_attr_lpfc_soft_wwnn, 2933 &dev_attr_lpfc_soft_wwnn,
2829 &dev_attr_lpfc_soft_wwpn, 2934 &dev_attr_lpfc_soft_wwpn,
2830 &dev_attr_lpfc_soft_wwn_enable, 2935 &dev_attr_lpfc_soft_wwn_enable,
@@ -2833,6 +2938,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2833 &dev_attr_lpfc_sg_seg_cnt, 2938 &dev_attr_lpfc_sg_seg_cnt,
2834 &dev_attr_lpfc_max_scsicmpl_time, 2939 &dev_attr_lpfc_max_scsicmpl_time,
2835 &dev_attr_lpfc_stat_data_ctrl, 2940 &dev_attr_lpfc_stat_data_ctrl,
2941 &dev_attr_lpfc_prot_sg_seg_cnt,
2836 NULL, 2942 NULL,
2837}; 2943};
2838 2944
@@ -3282,25 +3388,28 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
3282 int error; 3388 int error;
3283 3389
3284 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3390 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3285 &sysfs_ctlreg_attr); 3391 &sysfs_drvr_stat_data_attr);
3286 if (error) 3392
3393 /* Virtual ports do not need ctrl_reg and mbox */
3394 if (error || vport->port_type == LPFC_NPIV_PORT)
3287 goto out; 3395 goto out;
3288 3396
3289 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3397 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3290 &sysfs_mbox_attr); 3398 &sysfs_ctlreg_attr);
3291 if (error) 3399 if (error)
3292 goto out_remove_ctlreg_attr; 3400 goto out_remove_stat_attr;
3293 3401
3294 error = sysfs_create_bin_file(&shost->shost_dev.kobj, 3402 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
3295 &sysfs_drvr_stat_data_attr); 3403 &sysfs_mbox_attr);
3296 if (error) 3404 if (error)
3297 goto out_remove_mbox_attr; 3405 goto out_remove_ctlreg_attr;
3298 3406
3299 return 0; 3407 return 0;
3300out_remove_mbox_attr:
3301 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
3302out_remove_ctlreg_attr: 3408out_remove_ctlreg_attr:
3303 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3409 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
3410out_remove_stat_attr:
3411 sysfs_remove_bin_file(&shost->shost_dev.kobj,
3412 &sysfs_drvr_stat_data_attr);
3304out: 3413out:
3305 return error; 3414 return error;
3306} 3415}
@@ -3315,6 +3424,9 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
3315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3424 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3316 sysfs_remove_bin_file(&shost->shost_dev.kobj, 3425 sysfs_remove_bin_file(&shost->shost_dev.kobj,
3317 &sysfs_drvr_stat_data_attr); 3426 &sysfs_drvr_stat_data_attr);
3427 /* Virtual ports do not need ctrl_reg and mbox */
3428 if (vport->port_type == LPFC_NPIV_PORT)
3429 return;
3318 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 3430 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
3319 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3431 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
3320} 3432}
@@ -3792,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev, \
3792 lpfc_rport_show_function(field, format_string, sz, ) \ 3904 lpfc_rport_show_function(field, format_string, sz, ) \
3793static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) 3905static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
3794 3906
3907/**
3908 * lpfc_set_vport_symbolic_name: Set the vport's symbolic name.
3909 * @fc_vport: The fc_vport who's symbolic name has been changed.
3910 *
3911 * Description:
3912 * This function is called by the transport after the @fc_vport's symbolic name
3913 * has been changed. This function re-registers the symbolic name with the
3914 * switch to propogate the change into the fabric if the vport is active.
3915 **/
3916static void
3917lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3918{
3919 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
3920
3921 if (vport->port_state == LPFC_VPORT_READY)
3922 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3923}
3795 3924
3796struct fc_function_template lpfc_transport_functions = { 3925struct fc_function_template lpfc_transport_functions = {
3797 /* fixed attributes the driver supports */ 3926 /* fixed attributes the driver supports */
@@ -3801,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = {
3801 .show_host_supported_fc4s = 1, 3930 .show_host_supported_fc4s = 1,
3802 .show_host_supported_speeds = 1, 3931 .show_host_supported_speeds = 1,
3803 .show_host_maxframe_size = 1, 3932 .show_host_maxframe_size = 1,
3933 .show_host_symbolic_name = 1,
3804 3934
3805 /* dynamic attributes the driver supports */ 3935 /* dynamic attributes the driver supports */
3806 .get_host_port_id = lpfc_get_host_port_id, 3936 .get_host_port_id = lpfc_get_host_port_id,
@@ -3850,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = {
3850 .terminate_rport_io = lpfc_terminate_rport_io, 3980 .terminate_rport_io = lpfc_terminate_rport_io,
3851 3981
3852 .dd_fcvport_size = sizeof(struct lpfc_vport *), 3982 .dd_fcvport_size = sizeof(struct lpfc_vport *),
3983
3984 .vport_disable = lpfc_vport_disable,
3985
3986 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
3853}; 3987};
3854 3988
3855struct fc_function_template lpfc_vport_transport_functions = { 3989struct fc_function_template lpfc_vport_transport_functions = {
@@ -3860,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
3860 .show_host_supported_fc4s = 1, 3994 .show_host_supported_fc4s = 1,
3861 .show_host_supported_speeds = 1, 3995 .show_host_supported_speeds = 1,
3862 .show_host_maxframe_size = 1, 3996 .show_host_maxframe_size = 1,
3997 .show_host_symbolic_name = 1,
3863 3998
3864 /* dynamic attributes the driver supports */ 3999 /* dynamic attributes the driver supports */
3865 .get_host_port_id = lpfc_get_host_port_id, 4000 .get_host_port_id = lpfc_get_host_port_id,
@@ -3908,6 +4043,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
3908 .terminate_rport_io = lpfc_terminate_rport_io, 4043 .terminate_rport_io = lpfc_terminate_rport_io,
3909 4044
3910 .vport_disable = lpfc_vport_disable, 4045 .vport_disable = lpfc_vport_disable,
4046
4047 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
3911}; 4048};
3912 4049
3913/** 4050/**
@@ -3930,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
3930 lpfc_use_msi_init(phba, lpfc_use_msi); 4067 lpfc_use_msi_init(phba, lpfc_use_msi);
3931 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4068 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
3932 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4069 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4070 lpfc_enable_bg_init(phba, lpfc_enable_bg);
3933 phba->cfg_poll = lpfc_poll; 4071 phba->cfg_poll = lpfc_poll;
3934 phba->cfg_soft_wwnn = 0L; 4072 phba->cfg_soft_wwnn = 0L;
3935 phba->cfg_soft_wwpn = 0L; 4073 phba->cfg_soft_wwpn = 0L;
3936 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4074 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
3937 /* Also reinitialize the host templates with new values. */ 4075 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
3938 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3939 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3940 /* 4076 /*
3941 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4077 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3942 * used to create the sg_dma_buf_pool must be dynamically calculated. 4078 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -3945,6 +4081,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
3945 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4081 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3946 sizeof(struct fcp_rsp) + 4082 sizeof(struct fcp_rsp) +
3947 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4083 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4084
4085 if (phba->cfg_enable_bg) {
4086 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4087 phba->cfg_sg_dma_buf_size +=
4088 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4089 }
4090
4091 /* Also reinitialize the host templates with new values. */
4092 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4093 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4094
3948 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4095 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
3949 return; 4096 return;
3950} 4097}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 044ef4057d28..07f4976319a5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -22,6 +22,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
22 22
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
27 28
@@ -284,12 +285,24 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
284 uint32_t, uint32_t); 285 uint32_t, uint32_t);
285extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 286extern struct lpfc_hbq_init *lpfc_hbq_defs[];
286 287
288/* externs BlockGuard */
289extern char *_dump_buf_data;
290extern unsigned long _dump_buf_data_order;
291extern char *_dump_buf_dif;
292extern unsigned long _dump_buf_dif_order;
293extern spinlock_t _dump_buf_lock;
294extern int _dump_buf_done;
295extern spinlock_t pgcnt_lock;
296extern unsigned int pgcnt;
297extern unsigned int lpfc_prot_mask;
298extern unsigned char lpfc_prot_guard;
299
287/* Interface exported by fabric iocb scheduler */ 300/* Interface exported by fabric iocb scheduler */
288void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 301void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
289void lpfc_fabric_abort_hba(struct lpfc_hba *); 302void lpfc_fabric_abort_hba(struct lpfc_hba *);
290void lpfc_fabric_block_timeout(unsigned long); 303void lpfc_fabric_block_timeout(unsigned long);
291void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); 304void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
292void lpfc_adjust_queue_depth(struct lpfc_hba *); 305void lpfc_rampdown_queue_depth(struct lpfc_hba *);
293void lpfc_ramp_down_queue_handler(struct lpfc_hba *); 306void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
294void lpfc_ramp_up_queue_handler(struct lpfc_hba *); 307void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
295void lpfc_scsi_dev_block(struct lpfc_hba *); 308void lpfc_scsi_dev_block(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 26dae8bae2d1..896c7b0351e5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
560 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); 560 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
561 561
562 /* Don't bother processing response if vport is being torn down. */ 562 /* Don't bother processing response if vport is being torn down. */
563 if (vport->load_flag & FC_UNLOADING) 563 if (vport->load_flag & FC_UNLOADING) {
564 if (vport->fc_flag & FC_RSCN_MODE)
565 lpfc_els_flush_rscn(vport);
564 goto out; 566 goto out;
567 }
565 568
566 if (lpfc_els_chk_latt(vport)) { 569 if (lpfc_els_chk_latt(vport)) {
567 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 570 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
568 "0216 Link event during NS query\n"); 571 "0216 Link event during NS query\n");
572 if (vport->fc_flag & FC_RSCN_MODE)
573 lpfc_els_flush_rscn(vport);
569 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 574 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
570 goto out; 575 goto out;
571 } 576 }
572 if (lpfc_error_lost_link(irsp)) { 577 if (lpfc_error_lost_link(irsp)) {
573 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 578 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
574 "0226 NS query failed due to link event\n"); 579 "0226 NS query failed due to link event\n");
580 if (vport->fc_flag & FC_RSCN_MODE)
581 lpfc_els_flush_rscn(vport);
575 goto out; 582 goto out;
576 } 583 }
577 if (irsp->ulpStatus) { 584 if (irsp->ulpStatus) {
@@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
587 if (rc == 0) 594 if (rc == 0)
588 goto out; 595 goto out;
589 } 596 }
597 if (vport->fc_flag & FC_RSCN_MODE)
598 lpfc_els_flush_rscn(vport);
590 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 599 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
591 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 600 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
592 "0257 GID_FT Query error: 0x%x 0x%x\n", 601 "0257 GID_FT Query error: 0x%x 0x%x\n",
@@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
1008 if (n < size) 1017 if (n < size)
1009 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi); 1018 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
1010 1019
1011 if (n < size && vport->vname) 1020 if (n < size &&
1012 n += snprintf(symbol + n, size - n, " VName-%s", vport->vname); 1021 strlen(vport->fc_vport->symbolic_name))
1022 n += snprintf(symbol + n, size - n, " VName-%s",
1023 vport->fc_vport->symbolic_name);
1013 return n; 1024 return n;
1014} 1025}
1015 1026
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 771920bdde44..b615eda361d5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -46,7 +46,7 @@
46#include "lpfc_compat.h" 46#include "lpfc_compat.h"
47#include "lpfc_debugfs.h" 47#include "lpfc_debugfs.h"
48 48
49#ifdef CONFIG_LPFC_DEBUG_FS 49#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
50/** 50/**
51 * debugfs interface 51 * debugfs interface
52 * 52 *
@@ -618,7 +618,7 @@ inline void
618lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt, 618lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
619 uint32_t data1, uint32_t data2, uint32_t data3) 619 uint32_t data1, uint32_t data2, uint32_t data3)
620{ 620{
621#ifdef CONFIG_LPFC_DEBUG_FS 621#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
622 struct lpfc_debugfs_trc *dtp; 622 struct lpfc_debugfs_trc *dtp;
623 int index; 623 int index;
624 624
@@ -659,7 +659,7 @@ inline void
659lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, 659lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
660 uint32_t data1, uint32_t data2, uint32_t data3) 660 uint32_t data1, uint32_t data2, uint32_t data3)
661{ 661{
662#ifdef CONFIG_LPFC_DEBUG_FS 662#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663 struct lpfc_debugfs_trc *dtp; 663 struct lpfc_debugfs_trc *dtp;
664 int index; 664 int index;
665 665
@@ -680,7 +680,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
680 return; 680 return;
681} 681}
682 682
683#ifdef CONFIG_LPFC_DEBUG_FS 683#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
684/** 684/**
685 * lpfc_debugfs_disc_trc_open - Open the discovery trace log. 685 * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
686 * @inode: The inode pointer that contains a vport pointer. 686 * @inode: The inode pointer that contains a vport pointer.
@@ -907,6 +907,91 @@ out:
907 return rc; 907 return rc;
908} 908}
909 909
910static int
911lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
912{
913 struct lpfc_debug *debug;
914 int rc = -ENOMEM;
915
916 if (!_dump_buf_data)
917 return -EBUSY;
918
919 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
920 if (!debug)
921 goto out;
922
923 /* Round to page boundry */
924 printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
925 __func__, _dump_buf_data);
926 debug->buffer = _dump_buf_data;
927 if (!debug->buffer) {
928 kfree(debug);
929 goto out;
930 }
931
932 debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
933 file->private_data = debug;
934
935 rc = 0;
936out:
937 return rc;
938}
939
940static int
941lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
942{
943 struct lpfc_debug *debug;
944 int rc = -ENOMEM;
945
946 if (!_dump_buf_dif)
947 return -EBUSY;
948
949 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
950 if (!debug)
951 goto out;
952
953 /* Round to page boundry */
954 printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
955 _dump_buf_dif, file->f_dentry->d_name.name);
956 debug->buffer = _dump_buf_dif;
957 if (!debug->buffer) {
958 kfree(debug);
959 goto out;
960 }
961
962 debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
963 file->private_data = debug;
964
965 rc = 0;
966out:
967 return rc;
968}
969
970static ssize_t
971lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
972 size_t nbytes, loff_t *ppos)
973{
974 /*
975 * The Data/DIF buffers only save one failing IO
976 * The write op is used as a reset mechanism after an IO has
977 * already been saved to the next one can be saved
978 */
979 spin_lock(&_dump_buf_lock);
980
981 memset((void *)_dump_buf_data, 0,
982 ((1 << PAGE_SHIFT) << _dump_buf_data_order));
983 memset((void *)_dump_buf_dif, 0,
984 ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
985
986 _dump_buf_done = 0;
987
988 spin_unlock(&_dump_buf_lock);
989
990 return nbytes;
991}
992
993
994
910/** 995/**
911 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file. 996 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
912 * @inode: The inode pointer that contains a vport pointer. 997 * @inode: The inode pointer that contains a vport pointer.
@@ -1035,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
1035 return 0; 1120 return 0;
1036} 1121}
1037 1122
1123static int
1124lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
1125{
1126 struct lpfc_debug *debug = file->private_data;
1127
1128 debug->buffer = NULL;
1129 kfree(debug);
1130
1131 return 0;
1132}
1133
1038#undef lpfc_debugfs_op_disc_trc 1134#undef lpfc_debugfs_op_disc_trc
1039static struct file_operations lpfc_debugfs_op_disc_trc = { 1135static struct file_operations lpfc_debugfs_op_disc_trc = {
1040 .owner = THIS_MODULE, 1136 .owner = THIS_MODULE,
@@ -1080,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
1080 .release = lpfc_debugfs_release, 1176 .release = lpfc_debugfs_release,
1081}; 1177};
1082 1178
1179#undef lpfc_debugfs_op_dumpData
1180static struct file_operations lpfc_debugfs_op_dumpData = {
1181 .owner = THIS_MODULE,
1182 .open = lpfc_debugfs_dumpData_open,
1183 .llseek = lpfc_debugfs_lseek,
1184 .read = lpfc_debugfs_read,
1185 .write = lpfc_debugfs_dumpDataDif_write,
1186 .release = lpfc_debugfs_dumpDataDif_release,
1187};
1188
1189#undef lpfc_debugfs_op_dumpDif
1190static struct file_operations lpfc_debugfs_op_dumpDif = {
1191 .owner = THIS_MODULE,
1192 .open = lpfc_debugfs_dumpDif_open,
1193 .llseek = lpfc_debugfs_lseek,
1194 .read = lpfc_debugfs_read,
1195 .write = lpfc_debugfs_dumpDataDif_write,
1196 .release = lpfc_debugfs_dumpDataDif_release,
1197};
1198
1083#undef lpfc_debugfs_op_slow_ring_trc 1199#undef lpfc_debugfs_op_slow_ring_trc
1084static struct file_operations lpfc_debugfs_op_slow_ring_trc = { 1200static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
1085 .owner = THIS_MODULE, 1201 .owner = THIS_MODULE,
@@ -1106,7 +1222,7 @@ static atomic_t lpfc_debugfs_hba_count;
1106inline void 1222inline void
1107lpfc_debugfs_initialize(struct lpfc_vport *vport) 1223lpfc_debugfs_initialize(struct lpfc_vport *vport)
1108{ 1224{
1109#ifdef CONFIG_LPFC_DEBUG_FS 1225#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1110 struct lpfc_hba *phba = vport->phba; 1226 struct lpfc_hba *phba = vport->phba;
1111 char name[64]; 1227 char name[64];
1112 uint32_t num, i; 1228 uint32_t num, i;
@@ -1176,6 +1292,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1176 goto debug_failed; 1292 goto debug_failed;
1177 } 1293 }
1178 1294
1295 /* Setup dumpData */
1296 snprintf(name, sizeof(name), "dumpData");
1297 phba->debug_dumpData =
1298 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
1299 phba->hba_debugfs_root,
1300 phba, &lpfc_debugfs_op_dumpData);
1301 if (!phba->debug_dumpData) {
1302 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1303 "0800 Cannot create debugfs dumpData\n");
1304 goto debug_failed;
1305 }
1306
1307 /* Setup dumpDif */
1308 snprintf(name, sizeof(name), "dumpDif");
1309 phba->debug_dumpDif =
1310 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
1311 phba->hba_debugfs_root,
1312 phba, &lpfc_debugfs_op_dumpDif);
1313 if (!phba->debug_dumpDif) {
1314 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1315 "0801 Cannot create debugfs dumpDif\n");
1316 goto debug_failed;
1317 }
1318
1319
1320
1179 /* Setup slow ring trace */ 1321 /* Setup slow ring trace */
1180 if (lpfc_debugfs_max_slow_ring_trc) { 1322 if (lpfc_debugfs_max_slow_ring_trc) {
1181 num = lpfc_debugfs_max_slow_ring_trc - 1; 1323 num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1305,7 +1447,7 @@ debug_failed:
1305inline void 1447inline void
1306lpfc_debugfs_terminate(struct lpfc_vport *vport) 1448lpfc_debugfs_terminate(struct lpfc_vport *vport)
1307{ 1449{
1308#ifdef CONFIG_LPFC_DEBUG_FS 1450#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1309 struct lpfc_hba *phba = vport->phba; 1451 struct lpfc_hba *phba = vport->phba;
1310 1452
1311 if (vport->disc_trc) { 1453 if (vport->disc_trc) {
@@ -1340,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
1340 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ 1482 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
1341 phba->debug_dumpHostSlim = NULL; 1483 phba->debug_dumpHostSlim = NULL;
1342 } 1484 }
1485 if (phba->debug_dumpData) {
1486 debugfs_remove(phba->debug_dumpData); /* dumpData */
1487 phba->debug_dumpData = NULL;
1488 }
1489
1490 if (phba->debug_dumpDif) {
1491 debugfs_remove(phba->debug_dumpDif); /* dumpDif */
1492 phba->debug_dumpDif = NULL;
1493 }
1494
1343 if (phba->slow_ring_trc) { 1495 if (phba->slow_ring_trc) {
1344 kfree(phba->slow_ring_trc); 1496 kfree(phba->slow_ring_trc);
1345 phba->slow_ring_trc = NULL; 1497 phba->slow_ring_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 31e86a55391d..03c7313a1012 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -21,7 +21,7 @@
21#ifndef _H_LPFC_DEBUG_FS 21#ifndef _H_LPFC_DEBUG_FS
22#define _H_LPFC_DEBUG_FS 22#define _H_LPFC_DEBUG_FS
23 23
24#ifdef CONFIG_LPFC_DEBUG_FS 24#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
25struct lpfc_debugfs_trc { 25struct lpfc_debugfs_trc {
26 char *fmt; 26 char *fmt;
27 uint32_t data1; 27 uint32_t data1;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 630bd28fb997..a8f30bdaff69 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -221,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 221 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 222 icmd->ulpContext = vport->vpi;
223 icmd->ulpCt_h = 0; 223 icmd->ulpCt_h = 0;
224 icmd->ulpCt_l = 1; 224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO)
226 icmd->ulpCt_l = 0; /* context = invalid RPI */
227 else
228 icmd->ulpCt_l = 1; /* context = VPI */
225 } 229 }
226 230
227 bpl = (struct ulp_bde64 *) pbuflist->virt; 231 bpl = (struct ulp_bde64 *) pbuflist->virt;
@@ -271,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
271 return elsiocb; 275 return elsiocb;
272 276
273els_iocb_free_pbuf_exit: 277els_iocb_free_pbuf_exit:
274 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 278 if (expectRsp)
279 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
275 kfree(pbuflist); 280 kfree(pbuflist);
276 281
277els_iocb_free_prsp_exit: 282els_iocb_free_prsp_exit:
@@ -2468,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2468 case IOSTAT_LOCAL_REJECT: 2473 case IOSTAT_LOCAL_REJECT:
2469 switch ((irsp->un.ulpWord[4] & 0xff)) { 2474 switch ((irsp->un.ulpWord[4] & 0xff)) {
2470 case IOERR_LOOP_OPEN_FAILURE: 2475 case IOERR_LOOP_OPEN_FAILURE:
2476 if (cmd == ELS_CMD_FLOGI) {
2477 if (PCI_DEVICE_ID_HORNET ==
2478 phba->pcidev->device) {
2479 phba->fc_topology = TOPOLOGY_LOOP;
2480 phba->pport->fc_myDID = 0;
2481 phba->alpa_map[0] = 0;
2482 phba->alpa_map[1] = 0;
2483 }
2484 }
2471 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 2485 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2472 delay = 1000; 2486 delay = 1000;
2473 retry = 1; 2487 retry = 1;
@@ -3823,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3823 while (payload_len) { 3837 while (payload_len) {
3824 rscn_did.un.word = be32_to_cpu(*lp++); 3838 rscn_did.un.word = be32_to_cpu(*lp++);
3825 payload_len -= sizeof(uint32_t); 3839 payload_len -= sizeof(uint32_t);
3826 switch (rscn_did.un.b.resv) { 3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3827 case 0: /* Single N_Port ID effected */ 3841 case RSCN_ADDRESS_FORMAT_PORT:
3828 if (ns_did.un.word == rscn_did.un.word) 3842 if (ns_did.un.word == rscn_did.un.word)
3829 goto return_did_out; 3843 goto return_did_out;
3830 break; 3844 break;
3831 case 1: /* Whole N_Port Area effected */ 3845 case RSCN_ADDRESS_FORMAT_AREA:
3832 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 3846 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3833 && (ns_did.un.b.area == rscn_did.un.b.area)) 3847 && (ns_did.un.b.area == rscn_did.un.b.area))
3834 goto return_did_out; 3848 goto return_did_out;
3835 break; 3849 break;
3836 case 2: /* Whole N_Port Domain effected */ 3850 case RSCN_ADDRESS_FORMAT_DOMAIN:
3837 if (ns_did.un.b.domain == rscn_did.un.b.domain) 3851 if (ns_did.un.b.domain == rscn_did.un.b.domain)
3838 goto return_did_out; 3852 goto return_did_out;
3839 break; 3853 break;
3840 default: 3854 case RSCN_ADDRESS_FORMAT_FABRIC:
3841 /* Unknown Identifier in RSCN node */
3842 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
3843 "0217 Unknown Identifier in "
3844 "RSCN payload Data: x%x\n",
3845 rscn_did.un.word);
3846 case 3: /* Whole Fabric effected */
3847 goto return_did_out; 3855 goto return_did_out;
3848 } 3856 }
3849 } 3857 }
@@ -3887,6 +3895,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
3887} 3895}
3888 3896
3889/** 3897/**
3898 * lpfc_send_rscn_event: Send an RSCN event to management application.
3899 * @vport: pointer to a host virtual N_Port data structure.
3900 * @cmdiocb: pointer to lpfc command iocb data structure.
3901 *
3902 * lpfc_send_rscn_event sends an RSCN netlink event to management
3903 * applications.
3904 */
3905static void
3906lpfc_send_rscn_event(struct lpfc_vport *vport,
3907 struct lpfc_iocbq *cmdiocb)
3908{
3909 struct lpfc_dmabuf *pcmd;
3910 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3911 uint32_t *payload_ptr;
3912 uint32_t payload_len;
3913 struct lpfc_rscn_event_header *rscn_event_data;
3914
3915 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3916 payload_ptr = (uint32_t *) pcmd->virt;
3917 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
3918
3919 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
3920 payload_len, GFP_KERNEL);
3921 if (!rscn_event_data) {
3922 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3923 "0147 Failed to allocate memory for RSCN event\n");
3924 return;
3925 }
3926 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
3927 rscn_event_data->payload_length = payload_len;
3928 memcpy(rscn_event_data->rscn_payload, payload_ptr,
3929 payload_len);
3930
3931 fc_host_post_vendor_event(shost,
3932 fc_get_event_number(),
3933 sizeof(struct lpfc_els_event_header) + payload_len,
3934 (char *)rscn_event_data,
3935 LPFC_NL_VENDOR_ID);
3936
3937 kfree(rscn_event_data);
3938}
3939
3940/**
3890 * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. 3941 * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
3891 * @vport: pointer to a host virtual N_Port data structure. 3942 * @vport: pointer to a host virtual N_Port data structure.
3892 * @cmdiocb: pointer to lpfc command iocb data structure. 3943 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3933,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3933 "0214 RSCN received Data: x%x x%x x%x x%x\n", 3984 "0214 RSCN received Data: x%x x%x x%x x%x\n",
3934 vport->fc_flag, payload_len, *lp, 3985 vport->fc_flag, payload_len, *lp,
3935 vport->fc_rscn_id_cnt); 3986 vport->fc_rscn_id_cnt);
3987
3988 /* Send an RSCN event to the management application */
3989 lpfc_send_rscn_event(vport, cmdiocb);
3990
3936 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 3991 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
3937 fc_host_post_event(shost, fc_get_event_number(), 3992 fc_host_post_event(shost, fc_get_event_number(),
3938 FCH_EVT_RSCN, lp[i]); 3993 FCH_EVT_RSCN, lp[i]);
@@ -4884,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
4884 uint32_t timeout; 4939 uint32_t timeout;
4885 uint32_t remote_ID = 0xffffffff; 4940 uint32_t remote_ID = 0xffffffff;
4886 4941
4887 /* If the timer is already canceled do nothing */
4888 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
4889 return;
4890 }
4891 spin_lock_irq(&phba->hbalock); 4942 spin_lock_irq(&phba->hbalock);
4892 timeout = (uint32_t)(phba->fc_ratov << 1); 4943 timeout = (uint32_t)(phba->fc_ratov << 1);
4893 4944
@@ -5128,7 +5179,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5128 fc_get_event_number(), 5179 fc_get_event_number(),
5129 sizeof(lsrjt_event), 5180 sizeof(lsrjt_event),
5130 (char *)&lsrjt_event, 5181 (char *)&lsrjt_event,
5131 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5182 LPFC_NL_VENDOR_ID);
5132 return; 5183 return;
5133 } 5184 }
5134 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 5185 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
@@ -5146,7 +5197,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5146 fc_get_event_number(), 5197 fc_get_event_number(),
5147 sizeof(fabric_event), 5198 sizeof(fabric_event),
5148 (char *)&fabric_event, 5199 (char *)&fabric_event,
5149 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5200 LPFC_NL_VENDOR_ID);
5150 return; 5201 return;
5151 } 5202 }
5152 5203
@@ -5164,32 +5215,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5164static void 5215static void
5165lpfc_send_els_event(struct lpfc_vport *vport, 5216lpfc_send_els_event(struct lpfc_vport *vport,
5166 struct lpfc_nodelist *ndlp, 5217 struct lpfc_nodelist *ndlp,
5167 uint32_t cmd) 5218 uint32_t *payload)
5168{ 5219{
5169 struct lpfc_els_event_header els_data; 5220 struct lpfc_els_event_header *els_data = NULL;
5221 struct lpfc_logo_event *logo_data = NULL;
5170 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5222 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5171 5223
5172 els_data.event_type = FC_REG_ELS_EVENT; 5224 if (*payload == ELS_CMD_LOGO) {
5173 switch (cmd) { 5225 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5226 if (!logo_data) {
5227 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5228 "0148 Failed to allocate memory "
5229 "for LOGO event\n");
5230 return;
5231 }
5232 els_data = &logo_data->header;
5233 } else {
5234 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5235 GFP_KERNEL);
5236 if (!els_data) {
5237 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5238 "0149 Failed to allocate memory "
5239 "for ELS event\n");
5240 return;
5241 }
5242 }
5243 els_data->event_type = FC_REG_ELS_EVENT;
5244 switch (*payload) {
5174 case ELS_CMD_PLOGI: 5245 case ELS_CMD_PLOGI:
5175 els_data.subcategory = LPFC_EVENT_PLOGI_RCV; 5246 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
5176 break; 5247 break;
5177 case ELS_CMD_PRLO: 5248 case ELS_CMD_PRLO:
5178 els_data.subcategory = LPFC_EVENT_PRLO_RCV; 5249 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
5179 break; 5250 break;
5180 case ELS_CMD_ADISC: 5251 case ELS_CMD_ADISC:
5181 els_data.subcategory = LPFC_EVENT_ADISC_RCV; 5252 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5253 break;
5254 case ELS_CMD_LOGO:
5255 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5256 /* Copy the WWPN in the LOGO payload */
5257 memcpy(logo_data->logo_wwpn, &payload[2],
5258 sizeof(struct lpfc_name));
5182 break; 5259 break;
5183 default: 5260 default:
5184 return; 5261 return;
5185 } 5262 }
5186 memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 5263 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5187 memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 5264 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5188 fc_host_post_vendor_event(shost, 5265 if (*payload == ELS_CMD_LOGO) {
5189 fc_get_event_number(), 5266 fc_host_post_vendor_event(shost,
5190 sizeof(els_data), 5267 fc_get_event_number(),
5191 (char *)&els_data, 5268 sizeof(struct lpfc_logo_event),
5192 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5269 (char *)logo_data,
5270 LPFC_NL_VENDOR_ID);
5271 kfree(logo_data);
5272 } else {
5273 fc_host_post_vendor_event(shost,
5274 fc_get_event_number(),
5275 sizeof(struct lpfc_els_event_header),
5276 (char *)els_data,
5277 LPFC_NL_VENDOR_ID);
5278 kfree(els_data);
5279 }
5193 5280
5194 return; 5281 return;
5195} 5282}
@@ -5296,7 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5296 phba->fc_stat.elsRcvPLOGI++; 5383 phba->fc_stat.elsRcvPLOGI++;
5297 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 5384 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5298 5385
5299 lpfc_send_els_event(vport, ndlp, cmd); 5386 lpfc_send_els_event(vport, ndlp, payload);
5300 if (vport->port_state < LPFC_DISC_AUTH) { 5387 if (vport->port_state < LPFC_DISC_AUTH) {
5301 if (!(phba->pport->fc_flag & FC_PT2PT) || 5388 if (!(phba->pport->fc_flag & FC_PT2PT) ||
5302 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 5389 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -5334,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5334 did, vport->port_state, ndlp->nlp_flag); 5421 did, vport->port_state, ndlp->nlp_flag);
5335 5422
5336 phba->fc_stat.elsRcvLOGO++; 5423 phba->fc_stat.elsRcvLOGO++;
5424 lpfc_send_els_event(vport, ndlp, payload);
5337 if (vport->port_state < LPFC_DISC_AUTH) { 5425 if (vport->port_state < LPFC_DISC_AUTH) {
5338 rjt_err = LSRJT_UNABLE_TPC; 5426 rjt_err = LSRJT_UNABLE_TPC;
5339 break; 5427 break;
@@ -5346,7 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5346 did, vport->port_state, ndlp->nlp_flag); 5434 did, vport->port_state, ndlp->nlp_flag);
5347 5435
5348 phba->fc_stat.elsRcvPRLO++; 5436 phba->fc_stat.elsRcvPRLO++;
5349 lpfc_send_els_event(vport, ndlp, cmd); 5437 lpfc_send_els_event(vport, ndlp, payload);
5350 if (vport->port_state < LPFC_DISC_AUTH) { 5438 if (vport->port_state < LPFC_DISC_AUTH) {
5351 rjt_err = LSRJT_UNABLE_TPC; 5439 rjt_err = LSRJT_UNABLE_TPC;
5352 break; 5440 break;
@@ -5364,7 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5364 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 5452 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
5365 did, vport->port_state, ndlp->nlp_flag); 5453 did, vport->port_state, ndlp->nlp_flag);
5366 5454
5367 lpfc_send_els_event(vport, ndlp, cmd); 5455 lpfc_send_els_event(vport, ndlp, payload);
5368 phba->fc_stat.elsRcvADISC++; 5456 phba->fc_stat.elsRcvADISC++;
5369 if (vport->port_state < LPFC_DISC_AUTH) { 5457 if (vport->port_state < LPFC_DISC_AUTH) {
5370 rjt_err = LSRJT_UNABLE_TPC; 5458 rjt_err = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a1a70d9ffc2a..8c64494444bf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -350,7 +350,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
350 evt_data_size = sizeof(fast_evt_data->un. 350 evt_data_size = sizeof(fast_evt_data->un.
351 read_check_error); 351 read_check_error);
352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 352 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
353 (evt_sub_category == IOSTAT_NPORT_BSY)) { 353 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
354 evt_data = (char *) &fast_evt_data->un.fabric_evt; 354 evt_data = (char *) &fast_evt_data->un.fabric_evt;
355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 355 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
356 } else { 356 } else {
@@ -387,7 +387,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
387 fc_get_event_number(), 387 fc_get_event_number(),
388 evt_data_size, 388 evt_data_size,
389 evt_data, 389 evt_data,
390 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 390 LPFC_NL_VENDOR_ID);
391 391
392 lpfc_free_fast_evt(phba, fast_evt_data); 392 lpfc_free_fast_evt(phba, fast_evt_data);
393 return; 393 return;
@@ -585,20 +585,25 @@ lpfc_do_work(void *p)
585 set_user_nice(current, -20); 585 set_user_nice(current, -20);
586 phba->data_flags = 0; 586 phba->data_flags = 0;
587 587
588 while (1) { 588 while (!kthread_should_stop()) {
589 /* wait and check worker queue activities */ 589 /* wait and check worker queue activities */
590 rc = wait_event_interruptible(phba->work_waitq, 590 rc = wait_event_interruptible(phba->work_waitq,
591 (test_and_clear_bit(LPFC_DATA_READY, 591 (test_and_clear_bit(LPFC_DATA_READY,
592 &phba->data_flags) 592 &phba->data_flags)
593 || kthread_should_stop())); 593 || kthread_should_stop()));
594 BUG_ON(rc); 594 /* Signal wakeup shall terminate the worker thread */
595 595 if (rc) {
596 if (kthread_should_stop()) 596 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
597 "0433 Wakeup on signal: rc=x%x\n", rc);
597 break; 598 break;
599 }
598 600
599 /* Attend pending lpfc data processing */ 601 /* Attend pending lpfc data processing */
600 lpfc_work_done(phba); 602 lpfc_work_done(phba);
601 } 603 }
604 phba->worker_thread = NULL;
605 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
606 "0432 Worker thread stopped.\n");
602 return 0; 607 return 0;
603} 608}
604 609
@@ -1852,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1852 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1857 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1853 NLP_STE_UNUSED_NODE); 1858 NLP_STE_UNUSED_NODE);
1854} 1859}
1860/**
1861 * lpfc_initialize_node: Initialize all fields of node object.
1862 * @vport: Pointer to Virtual Port object.
1863 * @ndlp: Pointer to FC node object.
1864 * @did: FC_ID of the node.
1865 * This function is always called when node object need to
1866 * be initialized. It initializes all the fields of the node
1867 * object.
1868 **/
1869static inline void
1870lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1871 uint32_t did)
1872{
1873 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
1874 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1875 init_timer(&ndlp->nlp_delayfunc);
1876 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1877 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1878 ndlp->nlp_DID = did;
1879 ndlp->vport = vport;
1880 ndlp->nlp_sid = NLP_NO_SID;
1881 kref_init(&ndlp->kref);
1882 NLP_INT_NODE_ACT(ndlp);
1883 atomic_set(&ndlp->cmd_pending, 0);
1884 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1885}
1855 1886
1856struct lpfc_nodelist * 1887struct lpfc_nodelist *
1857lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1888lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -1892,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1892 /* re-initialize ndlp except of ndlp linked list pointer */ 1923 /* re-initialize ndlp except of ndlp linked list pointer */
1893 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 1924 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
1894 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 1925 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
1895 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 1926 lpfc_initialize_node(vport, ndlp, did);
1896 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1897 init_timer(&ndlp->nlp_delayfunc);
1898 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1899 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1900 ndlp->nlp_DID = did;
1901 ndlp->vport = vport;
1902 ndlp->nlp_sid = NLP_NO_SID;
1903 /* ndlp management re-initialize */
1904 kref_init(&ndlp->kref);
1905 NLP_INT_NODE_ACT(ndlp);
1906 1927
1907 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 1928 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1908 1929
@@ -3116,19 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3116 uint32_t did) 3137 uint32_t did)
3117{ 3138{
3118 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 3139 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
3119 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 3140
3120 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 3141 lpfc_initialize_node(vport, ndlp, did);
3121 init_timer(&ndlp->nlp_delayfunc);
3122 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
3123 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
3124 ndlp->nlp_DID = did;
3125 ndlp->vport = vport;
3126 ndlp->nlp_sid = NLP_NO_SID;
3127 INIT_LIST_HEAD(&ndlp->nlp_listp); 3142 INIT_LIST_HEAD(&ndlp->nlp_listp);
3128 kref_init(&ndlp->kref);
3129 NLP_INT_NODE_ACT(ndlp);
3130 atomic_set(&ndlp->cmd_pending, 0);
3131 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
3132 3143
3133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3144 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
3134 "node init: did:x%x", 3145 "node init: did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5de5dabbbee6..4168c7b498b8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -65,6 +65,9 @@
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67 67
68/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
70
68/* Common Transport structures and definitions */ 71/* Common Transport structures and definitions */
69 72
70union CtRevisionId { 73union CtRevisionId {
@@ -866,6 +869,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */
866 } un; 869 } un;
867} D_ID; 870} D_ID;
868 871
872#define RSCN_ADDRESS_FORMAT_PORT 0x0
873#define RSCN_ADDRESS_FORMAT_AREA 0x1
874#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
875#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
876#define RSCN_ADDRESS_FORMAT_MASK 0x3
877
869/* 878/*
870 * Structure to define all ELS Payload types 879 * Structure to define all ELS Payload types
871 */ 880 */
@@ -1535,6 +1544,108 @@ typedef struct ULP_BDL { /* SLI-2 */
1535 uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */ 1544 uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
1536} ULP_BDL; 1545} ULP_BDL;
1537 1546
1547/*
1548 * BlockGuard Definitions
1549 */
1550
1551enum lpfc_protgrp_type {
1552 LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */
1553 LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */
1554 LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */
1555 LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */
1556};
1557
1558/* PDE Descriptors */
1559#define LPFC_PDE1_DESCRIPTOR 0x81
1560#define LPFC_PDE2_DESCRIPTOR 0x82
1561#define LPFC_PDE3_DESCRIPTOR 0x83
1562
1563/* BlockGuard Profiles */
1564enum lpfc_bg_prof_codes {
1565 LPFC_PROF_INVALID,
1566 LPFC_PROF_A1 = 128, /* Full Protection */
1567 LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
1568 LPFC_PROF_A3,
1569 LPFC_PROF_A4,
1570 LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
1571 LPFC_PROF_B2,
1572 LPFC_PROF_B3,
1573 LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
1574 LPFC_PROF_C2,
1575 LPFC_PROF_C3,
1576 LPFC_PROF_D1, /* Full Protection */
1577 LPFC_PROF_D2, /* Partial Protection & Check Disabling */
1578 LPFC_PROF_D3,
1579 LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
1580 LPFC_PROF_E2,
1581 LPFC_PROF_E3,
1582 LPFC_PROF_E4,
1583 LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
1584 /* F1 Translation BDE */
1585 LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
1586 LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
1587 LPFC_PROF_ANT2,
1588 LPFC_PROF_AST2
1589};
1590
1591/* BlockGuard error-control defines */
1592#define BG_EC_STOP_ERR 0x00
1593#define BG_EC_CONT_ERR 0x01
1594#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
1595#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
1596
1597/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
1598#define PDE_DESC_TYPE_MASK 0xff000000
1599#define PDE_DESC_TYPE_SHIFT 24
1600#define PDE_BG_PROFILE_MASK 0x00ff0000
1601#define PDE_BG_PROFILE_SHIFT 16
1602#define PDE_BLOCK_LEN_MASK 0x0000fffc
1603#define PDE_BLOCK_LEN_SHIFT 2
1604#define PDE_ERR_CTRL_MASK 0x00000003
1605#define PDE_ERR_CTRL_SHIFT 0
1606/* PDE word 1 bit masks and shifts */
1607#define PDE_APPTAG_MASK_MASK 0xffff0000
1608#define PDE_APPTAG_MASK_SHIFT 16
1609#define PDE_APPTAG_VAL_MASK 0x0000ffff
1610#define PDE_APPTAG_VAL_SHIFT 0
1611struct lpfc_pde {
1612 uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
1613 uint32_t apptag; /* bitfields of app tag maskand app tag value */
1614 uint32_t reftag; /* reference tag occupying all 32 bits */
1615};
1616
1617/* inline function to set fields in parms of PDE */
1618static inline void
1619lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
1620{
1621 uint32_t *wp = &p->parms;
1622
1623 /* spec indicates that adapter appends two 0's to length field */
1624 len = len >> 2;
1625
1626 *wp &= 0;
1627 *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
1628 *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
1629 *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
1630 *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
1631 *wp = le32_to_cpu(*wp);
1632}
1633
1634/* inline function to set apptag and reftag fields of PDE */
1635static inline void
1636lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
1637 u32 reftag)
1638{
1639 uint32_t *wp = &p->apptag;
1640 *wp &= 0;
1641 *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
1642 *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
1643 *wp = le32_to_cpu(*wp);
1644 wp = &p->reftag;
1645 *wp = le32_to_cpu(reftag);
1646}
1647
1648
1538/* Structure for MB Command LOAD_SM and DOWN_LOAD */ 1649/* Structure for MB Command LOAD_SM and DOWN_LOAD */
1539 1650
1540typedef struct { 1651typedef struct {
@@ -2359,6 +2470,30 @@ typedef struct {
2359#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2360#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2361 2472
2473#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15
2475
2476/* Option rom version structure */
2477struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD
2479 uint8_t type;
2480 uint8_t id;
2481 uint32_t ver:4; /* Major Version */
2482 uint32_t rev:4; /* Revision */
2483 uint32_t lev:2; /* Level */
2484 uint32_t dist:2; /* Dist Type */
2485 uint32_t num:4; /* number after dist type */
2486#else /* __LITTLE_ENDIAN_BITFIELD */
2487 uint32_t num:4; /* number after dist type */
2488 uint32_t dist:2; /* Dist Type */
2489 uint32_t lev:2; /* Level */
2490 uint32_t rev:4; /* Revision */
2491 uint32_t ver:4; /* Major Version */
2492 uint8_t id;
2493 uint8_t type;
2494#endif
2495};
2496
2362/* Structure for MB Command UPDATE_CFG (0x1B) */ 2497/* Structure for MB Command UPDATE_CFG (0x1B) */
2363 2498
2364struct update_cfg_var { 2499struct update_cfg_var {
@@ -2552,11 +2687,19 @@ typedef struct {
2552 2687
2553 uint32_t pcbLow; /* bit 31:0 of memory based port config block */ 2688 uint32_t pcbLow; /* bit 31:0 of memory based port config block */
2554 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ 2689 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
2555 uint32_t hbainit[6]; 2690 uint32_t hbainit[5];
2691#ifdef __BIG_ENDIAN_BITFIELD
2692 uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
2693 uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
2694#else /* __LITTLE_ENDIAN */
2695 uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
2696 uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
2697#endif
2556 2698
2557#ifdef __BIG_ENDIAN_BITFIELD 2699#ifdef __BIG_ENDIAN_BITFIELD
2558 uint32_t rsvd : 24; /* Reserved */ 2700 uint32_t rsvd1 : 23; /* Reserved */
2559 uint32_t cmv : 1; /* Configure Max VPIs */ 2701 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */
2560 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2703 uint32_t ccrp : 1; /* Config Command Ring Polling */
2561 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 2704 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2562 uint32_t chbs : 1; /* Cofigure Host Backing store */ 2705 uint32_t chbs : 1; /* Cofigure Host Backing store */
@@ -2573,10 +2716,12 @@ typedef struct {
2573 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 2716 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2574 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2717 uint32_t ccrp : 1; /* Config Command Ring Polling */
2575 uint32_t cmv : 1; /* Configure Max VPIs */ 2718 uint32_t cmv : 1; /* Configure Max VPIs */
2576 uint32_t rsvd : 24; /* Reserved */ 2719 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */
2577#endif 2721#endif
2578#ifdef __BIG_ENDIAN_BITFIELD 2722#ifdef __BIG_ENDIAN_BITFIELD
2579 uint32_t rsvd2 : 24; /* Reserved */ 2723 uint32_t rsvd2 : 23; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */
2580 uint32_t gmv : 1; /* Grant Max VPIs */ 2725 uint32_t gmv : 1; /* Grant Max VPIs */
2581 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2582 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ 2727 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
@@ -2594,7 +2739,8 @@ typedef struct {
2594 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */ 2739 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2595 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2596 uint32_t gmv : 1; /* Grant Max VPIs */ 2741 uint32_t gmv : 1; /* Grant Max VPIs */
2597 uint32_t rsvd2 : 24; /* Reserved */ 2742 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */
2598#endif 2744#endif
2599 2745
2600#ifdef __BIG_ENDIAN_BITFIELD 2746#ifdef __BIG_ENDIAN_BITFIELD
@@ -3214,6 +3360,94 @@ struct que_xri64cx_ext_fields {
3214 struct lpfc_hbq_entry buff[5]; 3360 struct lpfc_hbq_entry buff[5];
3215}; 3361};
3216 3362
3363struct sli3_bg_fields {
3364 uint32_t filler[6]; /* word 8-13 in IOCB */
3365 uint32_t bghm; /* word 14 - BlockGuard High Water Mark */
3366/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
3367#define BGS_BIDIR_BG_PROF_MASK 0xff000000
3368#define BGS_BIDIR_BG_PROF_SHIFT 24
3369#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000
3370#define BGS_BIDIR_ERR_COND_SHIFT 16
3371#define BGS_BG_PROFILE_MASK 0x0000ff00
3372#define BGS_BG_PROFILE_SHIFT 8
3373#define BGS_INVALID_PROF_MASK 0x00000020
3374#define BGS_INVALID_PROF_SHIFT 5
3375#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010
3376#define BGS_UNINIT_DIF_BLOCK_SHIFT 4
3377#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008
3378#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3
3379#define BGS_REFTAG_ERR_MASK 0x00000004
3380#define BGS_REFTAG_ERR_SHIFT 2
3381#define BGS_APPTAG_ERR_MASK 0x00000002
3382#define BGS_APPTAG_ERR_SHIFT 1
3383#define BGS_GUARD_ERR_MASK 0x00000001
3384#define BGS_GUARD_ERR_SHIFT 0
3385 uint32_t bgstat; /* word 15 - BlockGuard Status */
3386};
3387
3388static inline uint32_t
3389lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
3390{
3391 return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
3392 BGS_BIDIR_BG_PROF_SHIFT;
3393}
3394
3395static inline uint32_t
3396lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
3397{
3398 return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
3399 BGS_BIDIR_ERR_COND_SHIFT;
3400}
3401
3402static inline uint32_t
3403lpfc_bgs_get_bg_prof(uint32_t bgstat)
3404{
3405 return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
3406 BGS_BG_PROFILE_SHIFT;
3407}
3408
3409static inline uint32_t
3410lpfc_bgs_get_invalid_prof(uint32_t bgstat)
3411{
3412 return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
3413 BGS_INVALID_PROF_SHIFT;
3414}
3415
3416static inline uint32_t
3417lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
3418{
3419 return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
3420 BGS_UNINIT_DIF_BLOCK_SHIFT;
3421}
3422
3423static inline uint32_t
3424lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
3425{
3426 return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
3427 BGS_HI_WATER_MARK_PRESENT_SHIFT;
3428}
3429
3430static inline uint32_t
3431lpfc_bgs_get_reftag_err(uint32_t bgstat)
3432{
3433 return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
3434 BGS_REFTAG_ERR_SHIFT;
3435}
3436
3437static inline uint32_t
3438lpfc_bgs_get_apptag_err(uint32_t bgstat)
3439{
3440 return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
3441 BGS_APPTAG_ERR_SHIFT;
3442}
3443
3444static inline uint32_t
3445lpfc_bgs_get_guard_err(uint32_t bgstat)
3446{
3447 return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
3448 BGS_GUARD_ERR_SHIFT;
3449}
3450
3217#define LPFC_EXT_DATA_BDE_COUNT 3 3451#define LPFC_EXT_DATA_BDE_COUNT 3
3218struct fcp_irw_ext { 3452struct fcp_irw_ext {
3219 uint32_t io_tag64_low; 3453 uint32_t io_tag64_low;
@@ -3322,6 +3556,9 @@ typedef struct _IOCB { /* IOCB structure */
3322 struct que_xri64cx_ext_fields que_xri64cx_ext_words; 3556 struct que_xri64cx_ext_fields que_xri64cx_ext_words;
3323 struct fcp_irw_ext fcp_ext; 3557 struct fcp_irw_ext fcp_ext;
3324 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ 3558 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3559
3560 /* words 8-15 for BlockGuard */
3561 struct sli3_bg_fields sli3_bg;
3325 } unsli3; 3562 } unsli3;
3326 3563
3327#define ulpCt_h ulpXS 3564#define ulpCt_h ulpXS
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 909be3301bba..4c77038c8f1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -45,6 +45,12 @@
45#include "lpfc_vport.h" 45#include "lpfc_vport.h"
46#include "lpfc_version.h" 46#include "lpfc_version.h"
47 47
48char *_dump_buf_data;
49unsigned long _dump_buf_data_order;
50char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock;
53
48static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
49static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
50static int lpfc_post_rcv_buf(struct lpfc_hba *); 56static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -236,6 +242,51 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
236} 242}
237 243
238/** 244/**
245 * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
246 * command used for getting wake up parameters.
247 * @phba: pointer to lpfc hba data structure.
248 * @pmboxq: pointer to the driver internal queue element for mailbox command.
249 *
250 * This is the completion handler for dump mailbox command for getting
251 * wake up parameters. When this command complete, the response contain
252 * Option rom version of the HBA. This function translate the version number
253 * into a human readable string and store it in OptionROMVersion.
254 **/
255static void
256lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
257{
258 struct prog_id *prg;
259 uint32_t prog_id_word;
260 char dist = ' ';
261 /* character array used for decoding dist type. */
262 char dist_char[] = "nabx";
263
264 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
265 mempool_free(pmboxq, phba->mbox_mem_pool);
266 return;
267 }
268
269 prg = (struct prog_id *) &prog_id_word;
270
271 /* word 7 contain option rom version */
272 prog_id_word = pmboxq->mb.un.varWords[7];
273
274 /* Decode the Option rom version word to a readable string */
275 if (prg->dist < 4)
276 dist = dist_char[prg->dist];
277
278 if ((prg->dist == 3) && (prg->num == 0))
279 sprintf(phba->OptionROMVersion, "%d.%d%d",
280 prg->ver, prg->rev, prg->lev);
281 else
282 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
283 prg->ver, prg->rev, prg->lev,
284 dist, prg->num);
285 mempool_free(pmboxq, phba->mbox_mem_pool);
286 return;
287}
288
289/**
239 * lpfc_config_port_post: Perform lpfc initialization after config port. 290 * lpfc_config_port_post: Perform lpfc initialization after config port.
240 * @phba: pointer to lpfc hba data structure. 291 * @phba: pointer to lpfc hba data structure.
241 * 292 *
@@ -482,6 +533,20 @@ lpfc_config_port_post(struct lpfc_hba *phba)
482 rc); 533 rc);
483 mempool_free(pmb, phba->mbox_mem_pool); 534 mempool_free(pmb, phba->mbox_mem_pool);
484 } 535 }
536
537 /* Get Option rom version */
538 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
539 lpfc_dump_wakeup_param(phba, pmb);
540 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
541 pmb->vport = phba->pport;
542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
543
544 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
546 "to get Option ROM version status x%x\n.", rc);
547 mempool_free(pmb, phba->mbox_mem_pool);
548 }
549
485 return 0; 550 return 0;
486} 551}
487 552
@@ -686,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
686 return; 751 return;
687 752
688 spin_lock_irq(&phba->pport->work_port_lock); 753 spin_lock_irq(&phba->pport->work_port_lock);
689 /* If the timer is already canceled do nothing */
690 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
691 spin_unlock_irq(&phba->pport->work_port_lock);
692 return;
693 }
694 754
695 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 755 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
696 jiffies)) { 756 jiffies)) {
@@ -833,8 +893,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
833 fc_host_post_vendor_event(shost, fc_get_event_number(), 893 fc_host_post_vendor_event(shost, fc_get_event_number(),
834 sizeof(board_event), 894 sizeof(board_event),
835 (char *) &board_event, 895 (char *) &board_event,
836 SCSI_NL_VID_TYPE_PCI 896 LPFC_NL_VENDOR_ID);
837 | PCI_VENDOR_ID_EMULEX);
838 897
839 if (phba->work_hs & HS_FFER6) { 898 if (phba->work_hs & HS_FFER6) {
840 /* Re-establishing Link */ 899 /* Re-establishing Link */
@@ -1984,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
1984 shost->max_lun = vport->cfg_max_luns; 2043 shost->max_lun = vport->cfg_max_luns;
1985 shost->this_id = -1; 2044 shost->this_id = -1;
1986 shost->max_cmd_len = 16; 2045 shost->max_cmd_len = 16;
2046
1987 /* 2047 /*
1988 * Set initial can_queue value since 0 is no longer supported and 2048 * Set initial can_queue value since 0 is no longer supported and
1989 * scsi_add_host will fail. This will be adjusted later based on the 2049 * scsi_add_host will fail. This will be adjusted later based on the
@@ -2042,8 +2102,6 @@ destroy_port(struct lpfc_vport *vport)
2042 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2043 struct lpfc_hba *phba = vport->phba; 2103 struct lpfc_hba *phba = vport->phba;
2044 2104
2045 kfree(vport->vname);
2046
2047 lpfc_debugfs_terminate(vport); 2105 lpfc_debugfs_terminate(vport);
2048 fc_remove_host(shost); 2106 fc_remove_host(shost);
2049 scsi_remove_host(shost); 2107 scsi_remove_host(shost);
@@ -2226,8 +2284,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2226 ARRAY_SIZE(phba->msix_entries)); 2284 ARRAY_SIZE(phba->msix_entries));
2227 if (rc) { 2285 if (rc) {
2228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2229 "0420 Enable MSI-X failed (%d), continuing " 2287 "0420 PCI enable MSI-X failed (%d)\n", rc);
2230 "with MSI\n", rc);
2231 goto msi_fail_out; 2288 goto msi_fail_out;
2232 } else 2289 } else
2233 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2290 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
@@ -2244,9 +2301,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2244 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 2301 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
2245 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 2302 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
2246 if (rc) { 2303 if (rc) {
2247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2304 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2248 "0421 MSI-X slow-path request_irq failed " 2305 "0421 MSI-X slow-path request_irq failed "
2249 "(%d), continuing with MSI\n", rc); 2306 "(%d)\n", rc);
2250 goto msi_fail_out; 2307 goto msi_fail_out;
2251 } 2308 }
2252 2309
@@ -2255,9 +2312,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2255 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 2312 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
2256 2313
2257 if (rc) { 2314 if (rc) {
2258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2315 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2259 "0429 MSI-X fast-path request_irq failed " 2316 "0429 MSI-X fast-path request_irq failed "
2260 "(%d), continuing with MSI\n", rc); 2317 "(%d)\n", rc);
2261 goto irq_fail_out; 2318 goto irq_fail_out;
2262 } 2319 }
2263 2320
@@ -2278,7 +2335,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2278 goto mbx_fail_out; 2335 goto mbx_fail_out;
2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2336 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2280 if (rc != MBX_SUCCESS) { 2337 if (rc != MBX_SUCCESS) {
2281 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2338 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2282 "0351 Config MSI mailbox command failed, " 2339 "0351 Config MSI mailbox command failed, "
2283 "mbxCmd x%x, mbxStatus x%x\n", 2340 "mbxCmd x%x, mbxStatus x%x\n",
2284 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 2341 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
@@ -2327,6 +2384,195 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2327} 2384}
2328 2385
2329/** 2386/**
2387 * lpfc_enable_msi: Enable MSI interrupt mode.
2388 * @phba: pointer to lpfc hba data structure.
2389 *
2390 * This routine is invoked to enable the MSI interrupt mode. The kernel
2391 * function pci_enable_msi() is called to enable the MSI vector. The
2392 * device driver is responsible for calling the request_irq() to register
2393 * MSI vector with a interrupt the handler, which is done in this function.
2394 *
2395 * Return codes
2396 * 0 - sucessful
2397 * other values - error
2398 */
2399static int
2400lpfc_enable_msi(struct lpfc_hba *phba)
2401{
2402 int rc;
2403
2404 rc = pci_enable_msi(phba->pcidev);
2405 if (!rc)
2406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2407 "0462 PCI enable MSI mode success.\n");
2408 else {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2410 "0471 PCI enable MSI mode failed (%d)\n", rc);
2411 return rc;
2412 }
2413
2414 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2415 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2416 if (rc) {
2417 pci_disable_msi(phba->pcidev);
2418 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2419 "0478 MSI request_irq failed (%d)\n", rc);
2420 }
2421 return rc;
2422}
2423
2424/**
2425 * lpfc_disable_msi: Disable MSI interrupt mode.
2426 * @phba: pointer to lpfc hba data structure.
2427 *
2428 * This routine is invoked to disable the MSI interrupt mode. The driver
2429 * calls free_irq() on MSI vector it has done request_irq() on before
2430 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
2431 * a device will be left with MSI enabled and leaks its vector.
2432 */
2433
2434static void
2435lpfc_disable_msi(struct lpfc_hba *phba)
2436{
2437 free_irq(phba->pcidev->irq, phba);
2438 pci_disable_msi(phba->pcidev);
2439 return;
2440}
2441
2442/**
2443 * lpfc_log_intr_mode: Log the active interrupt mode
2444 * @phba: pointer to lpfc hba data structure.
2445 * @intr_mode: active interrupt mode adopted.
2446 *
2447 * This routine it invoked to log the currently used active interrupt mode
2448 * to the device.
2449 */
2450static void
2451lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
2452{
2453 switch (intr_mode) {
2454 case 0:
2455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2456 "0470 Enable INTx interrupt mode.\n");
2457 break;
2458 case 1:
2459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2460 "0481 Enabled MSI interrupt mode.\n");
2461 break;
2462 case 2:
2463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2464 "0480 Enabled MSI-X interrupt mode.\n");
2465 break;
2466 default:
2467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2468 "0482 Illegal interrupt mode.\n");
2469 break;
2470 }
2471 return;
2472}
2473
2474static void
2475lpfc_stop_port(struct lpfc_hba *phba)
2476{
2477 /* Clear all interrupt enable conditions */
2478 writel(0, phba->HCregaddr);
2479 readl(phba->HCregaddr); /* flush */
2480 /* Clear all pending interrupts */
2481 writel(0xffffffff, phba->HAregaddr);
2482 readl(phba->HAregaddr); /* flush */
2483
2484 /* Reset some HBA SLI setup states */
2485 lpfc_stop_phba_timers(phba);
2486 phba->pport->work_port_events = 0;
2487
2488 return;
2489}
2490
2491/**
2492 * lpfc_enable_intr: Enable device interrupt.
2493 * @phba: pointer to lpfc hba data structure.
2494 *
2495 * This routine is invoked to enable device interrupt and associate driver's
2496 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
2497 * mode configured to the driver, the driver will try to fallback from the
2498 * configured interrupt mode to an interrupt mode which is supported by the
2499 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
2500 *
2501 * Return codes
2502 * 0 - sucessful
2503 * other values - error
2504 **/
2505static uint32_t
2506lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2507{
2508 uint32_t intr_mode = LPFC_INTR_ERROR;
2509 int retval;
2510
2511 if (cfg_mode == 2) {
2512 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2513 retval = lpfc_sli_config_port(phba, 3);
2514 if (!retval) {
2515 /* Now, try to enable MSI-X interrupt mode */
2516 retval = lpfc_enable_msix(phba);
2517 if (!retval) {
2518 /* Indicate initialization to MSI-X mode */
2519 phba->intr_type = MSIX;
2520 intr_mode = 2;
2521 }
2522 }
2523 }
2524
2525 /* Fallback to MSI if MSI-X initialization failed */
2526 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2527 retval = lpfc_enable_msi(phba);
2528 if (!retval) {
2529 /* Indicate initialization to MSI mode */
2530 phba->intr_type = MSI;
2531 intr_mode = 1;
2532 }
2533 }
2534
2535 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2536 if (phba->intr_type == NONE) {
2537 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2538 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2539 if (!retval) {
2540 /* Indicate initialization to INTx mode */
2541 phba->intr_type = INTx;
2542 intr_mode = 0;
2543 }
2544 }
2545 return intr_mode;
2546}
2547
2548/**
2549 * lpfc_disable_intr: Disable device interrupt.
2550 * @phba: pointer to lpfc hba data structure.
2551 *
2552 * This routine is invoked to disable device interrupt and disassociate the
2553 * driver's interrupt handler(s) from interrupt vector(s). Depending on the
2554 * interrupt mode, the driver will release the interrupt vector(s) for the
2555 * message signaled interrupt.
2556 **/
2557static void
2558lpfc_disable_intr(struct lpfc_hba *phba)
2559{
2560 /* Disable the currently initialized interrupt mode */
2561 if (phba->intr_type == MSIX)
2562 lpfc_disable_msix(phba);
2563 else if (phba->intr_type == MSI)
2564 lpfc_disable_msi(phba);
2565 else if (phba->intr_type == INTx)
2566 free_irq(phba->pcidev->irq, phba);
2567
2568 /* Reset interrupt management states */
2569 phba->intr_type = NONE;
2570 phba->sli.slistat.sli_intr = 0;
2571
2572 return;
2573}
2574
2575/**
2330 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. 2576 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
2331 * @pdev: pointer to PCI device 2577 * @pdev: pointer to PCI device
2332 * @pid: pointer to PCI device identifier 2578 * @pid: pointer to PCI device identifier
@@ -2356,6 +2602,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2356 int error = -ENODEV, retval; 2602 int error = -ENODEV, retval;
2357 int i, hbq_count; 2603 int i, hbq_count;
2358 uint16_t iotag; 2604 uint16_t iotag;
2605 uint32_t cfg_mode, intr_mode;
2359 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2606 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2360 struct lpfc_adapter_event_header adapter_event; 2607 struct lpfc_adapter_event_header adapter_event;
2361 2608
@@ -2409,6 +2656,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2409 phba->eratt_poll.data = (unsigned long) phba; 2656 phba->eratt_poll.data = (unsigned long) phba;
2410 2657
2411 pci_set_master(pdev); 2658 pci_set_master(pdev);
2659 pci_save_state(pdev);
2412 pci_try_set_mwi(pdev); 2660 pci_try_set_mwi(pdev);
2413 2661
2414 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 2662 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
@@ -2557,7 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2557 lpfc_debugfs_initialize(vport); 2805 lpfc_debugfs_initialize(vport);
2558 2806
2559 pci_set_drvdata(pdev, shost); 2807 pci_set_drvdata(pdev, shost);
2560 phba->intr_type = NONE;
2561 2808
2562 phba->MBslimaddr = phba->slim_memmap_p; 2809 phba->MBslimaddr = phba->slim_memmap_p;
2563 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2810 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
@@ -2565,63 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2565 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2812 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2566 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2813 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2567 2814
2568 /* Configure and enable interrupt */ 2815 /* Configure sysfs attributes */
2569 if (phba->cfg_use_msi == 2) {
2570 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2571 error = lpfc_sli_config_port(phba, 3);
2572 if (error)
2573 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2574 "0427 Firmware not capable of SLI 3 mode.\n");
2575 else {
2576 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2577 "0426 Firmware capable of SLI 3 mode.\n");
2578 /* Now, try to enable MSI-X interrupt mode */
2579 error = lpfc_enable_msix(phba);
2580 if (!error) {
2581 phba->intr_type = MSIX;
2582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2583 "0430 enable MSI-X mode.\n");
2584 }
2585 }
2586 }
2587
2588 /* Fallback to MSI if MSI-X initialization failed */
2589 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2590 retval = pci_enable_msi(phba->pcidev);
2591 if (!retval) {
2592 phba->intr_type = MSI;
2593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2594 "0473 enable MSI mode.\n");
2595 } else
2596 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2597 "0452 enable IRQ mode.\n");
2598 }
2599
2600 /* MSI-X is the only case the doesn't need to call request_irq */
2601 if (phba->intr_type != MSIX) {
2602 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2603 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2604 if (retval) {
2605 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
2606 "interrupt handler failed\n");
2607 error = retval;
2608 goto out_disable_msi;
2609 } else if (phba->intr_type != MSI)
2610 phba->intr_type = INTx;
2611 }
2612
2613 if (lpfc_alloc_sysfs_attr(vport)) { 2816 if (lpfc_alloc_sysfs_attr(vport)) {
2614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2615 "1476 Failed to allocate sysfs attr\n"); 2818 "1476 Failed to allocate sysfs attr\n");
2616 error = -ENOMEM; 2819 error = -ENOMEM;
2617 goto out_free_irq; 2820 goto out_destroy_port;
2618 } 2821 }
2619 2822
2620 if (lpfc_sli_hba_setup(phba)) { 2823 cfg_mode = phba->cfg_use_msi;
2621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2824 while (true) {
2622 "1477 Failed to set up hba\n"); 2825 /* Configure and enable interrupt */
2623 error = -ENODEV; 2826 intr_mode = lpfc_enable_intr(phba, cfg_mode);
2624 goto out_remove_device; 2827 if (intr_mode == LPFC_INTR_ERROR) {
2828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2829 "0426 Failed to enable interrupt.\n");
2830 goto out_free_sysfs_attr;
2831 }
2832 /* HBA SLI setup */
2833 if (lpfc_sli_hba_setup(phba)) {
2834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2835 "1477 Failed to set up hba\n");
2836 error = -ENODEV;
2837 goto out_remove_device;
2838 }
2839
2840 /* Wait 50ms for the interrupts of previous mailbox commands */
2841 msleep(50);
2842 /* Check active interrupts received */
2843 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2844 /* Log the current active interrupt mode */
2845 phba->intr_mode = intr_mode;
2846 lpfc_log_intr_mode(phba, intr_mode);
2847 break;
2848 } else {
2849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2850 "0451 Configure interrupt mode (%d) "
2851 "failed active interrupt test.\n",
2852 intr_mode);
2853 if (intr_mode == 0) {
2854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2855 "0479 Failed to enable "
2856 "interrupt.\n");
2857 error = -ENODEV;
2858 goto out_remove_device;
2859 }
2860 /* Stop HBA SLI setups */
2861 lpfc_stop_port(phba);
2862 /* Disable the current interrupt mode */
2863 lpfc_disable_intr(phba);
2864 /* Try next level of interrupt mode */
2865 cfg_mode = --intr_mode;
2866 }
2625 } 2867 }
2626 2868
2627 /* 2869 /*
@@ -2629,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2629 * the value of can_queue. 2871 * the value of can_queue.
2630 */ 2872 */
2631 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2873 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2874 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2875
2876 if (lpfc_prot_mask && lpfc_prot_guard) {
2877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2878 "1478 Registering BlockGuard with the "
2879 "SCSI layer\n");
2880
2881 scsi_host_set_prot(shost, lpfc_prot_mask);
2882 scsi_host_set_guard(shost, lpfc_prot_guard);
2883 }
2884 }
2885
2886 if (!_dump_buf_data) {
2887 int pagecnt = 10;
2888 while (pagecnt) {
2889 spin_lock_init(&_dump_buf_lock);
2890 _dump_buf_data =
2891 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2892 if (_dump_buf_data) {
2893 printk(KERN_ERR "BLKGRD allocated %d pages for "
2894 "_dump_buf_data at 0x%p\n",
2895 (1 << pagecnt), _dump_buf_data);
2896 _dump_buf_data_order = pagecnt;
2897 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2898 << pagecnt));
2899 break;
2900 } else {
2901 --pagecnt;
2902 }
2903
2904 }
2905
2906 if (!_dump_buf_data_order)
2907 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2908 "memory for hexdump\n");
2909
2910 } else {
2911 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2912 "\n", _dump_buf_data);
2913 }
2914
2915
2916 if (!_dump_buf_dif) {
2917 int pagecnt = 10;
2918 while (pagecnt) {
2919 _dump_buf_dif =
2920 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2921 if (_dump_buf_dif) {
2922 printk(KERN_ERR "BLKGRD allocated %d pages for "
2923 "_dump_buf_dif at 0x%p\n",
2924 (1 << pagecnt), _dump_buf_dif);
2925 _dump_buf_dif_order = pagecnt;
2926 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2927 << pagecnt));
2928 break;
2929 } else {
2930 --pagecnt;
2931 }
2932
2933 }
2934
2935 if (!_dump_buf_dif_order)
2936 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2937 "memory for hexdump\n");
2938
2939 } else {
2940 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
2941 _dump_buf_dif);
2942 }
2632 2943
2633 lpfc_host_attrib_init(shost); 2944 lpfc_host_attrib_init(shost);
2634 2945
@@ -2646,29 +2957,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2646 fc_host_post_vendor_event(shost, fc_get_event_number(), 2957 fc_host_post_vendor_event(shost, fc_get_event_number(),
2647 sizeof(adapter_event), 2958 sizeof(adapter_event),
2648 (char *) &adapter_event, 2959 (char *) &adapter_event,
2649 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2960 LPFC_NL_VENDOR_ID);
2650
2651 scsi_scan_host(shost);
2652 2961
2653 return 0; 2962 return 0;
2654 2963
2655out_remove_device: 2964out_remove_device:
2656 lpfc_free_sysfs_attr(vport);
2657 spin_lock_irq(shost->host_lock); 2965 spin_lock_irq(shost->host_lock);
2658 vport->load_flag |= FC_UNLOADING; 2966 vport->load_flag |= FC_UNLOADING;
2659 spin_unlock_irq(shost->host_lock); 2967 spin_unlock_irq(shost->host_lock);
2660out_free_irq:
2661 lpfc_stop_phba_timers(phba); 2968 lpfc_stop_phba_timers(phba);
2662 phba->pport->work_port_events = 0; 2969 phba->pport->work_port_events = 0;
2663 2970 lpfc_disable_intr(phba);
2664 if (phba->intr_type == MSIX) 2971 lpfc_sli_hba_down(phba);
2665 lpfc_disable_msix(phba); 2972 lpfc_sli_brdrestart(phba);
2666 else 2973out_free_sysfs_attr:
2667 free_irq(phba->pcidev->irq, phba); 2974 lpfc_free_sysfs_attr(vport);
2668 2975out_destroy_port:
2669out_disable_msi:
2670 if (phba->intr_type == MSI)
2671 pci_disable_msi(phba->pcidev);
2672 destroy_port(vport); 2976 destroy_port(vport);
2673out_kthread_stop: 2977out_kthread_stop:
2674 kthread_stop(phba->worker_thread); 2978 kthread_stop(phba->worker_thread);
@@ -2709,7 +3013,7 @@ out:
2709 * @pdev: pointer to PCI device 3013 * @pdev: pointer to PCI device
2710 * 3014 *
2711 * This routine is to be registered to the kernel's PCI subsystem. When an 3015 * This routine is to be registered to the kernel's PCI subsystem. When an
2712 * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup 3016 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
2713 * for the HBA device to be removed from the PCI subsystem properly. 3017 * for the HBA device to be removed from the PCI subsystem properly.
2714 **/ 3018 **/
2715static void __devexit 3019static void __devexit
@@ -2717,18 +3021,27 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2717{ 3021{
2718 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3022 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2719 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3023 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3024 struct lpfc_vport **vports;
2720 struct lpfc_hba *phba = vport->phba; 3025 struct lpfc_hba *phba = vport->phba;
3026 int i;
2721 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 3027 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2722 3028
2723 spin_lock_irq(&phba->hbalock); 3029 spin_lock_irq(&phba->hbalock);
2724 vport->load_flag |= FC_UNLOADING; 3030 vport->load_flag |= FC_UNLOADING;
2725 spin_unlock_irq(&phba->hbalock); 3031 spin_unlock_irq(&phba->hbalock);
2726 3032
2727 kfree(vport->vname);
2728 lpfc_free_sysfs_attr(vport); 3033 lpfc_free_sysfs_attr(vport);
2729 3034
2730 kthread_stop(phba->worker_thread); 3035 kthread_stop(phba->worker_thread);
2731 3036
3037 /* Release all the vports against this physical port */
3038 vports = lpfc_create_vport_work_array(phba);
3039 if (vports != NULL)
3040 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
3041 fc_vport_terminate(vports[i]->fc_vport);
3042 lpfc_destroy_vport_work_array(phba, vports);
3043
3044 /* Remove FC host and then SCSI host with the physical port */
2732 fc_remove_host(shost); 3045 fc_remove_host(shost);
2733 scsi_remove_host(shost); 3046 scsi_remove_host(shost);
2734 lpfc_cleanup(vport); 3047 lpfc_cleanup(vport);
@@ -2748,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2748 3061
2749 lpfc_debugfs_terminate(vport); 3062 lpfc_debugfs_terminate(vport);
2750 3063
2751 if (phba->intr_type == MSIX) 3064 /* Disable interrupt */
2752 lpfc_disable_msix(phba); 3065 lpfc_disable_intr(phba);
2753 else {
2754 free_irq(phba->pcidev->irq, phba);
2755 if (phba->intr_type == MSI)
2756 pci_disable_msi(phba->pcidev);
2757 }
2758 3066
2759 pci_set_drvdata(pdev, NULL); 3067 pci_set_drvdata(pdev, NULL);
2760 scsi_host_put(shost); 3068 scsi_host_put(shost);
@@ -2786,6 +3094,115 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2786} 3094}
2787 3095
2788/** 3096/**
3097 * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
3098 * @pdev: pointer to PCI device
3099 * @msg: power management message
3100 *
3101 * This routine is to be registered to the kernel's PCI subsystem to support
3102 * system Power Management (PM). When PM invokes this method, it quiesces the
3103 * device by stopping the driver's worker thread for the device, turning off
3104 * device's interrupt and DMA, and bring the device offline. Note that as the
3105 * driver implements the minimum PM requirements to a power-aware driver's PM
3106 * support for suspend/resume -- all the possible PM messages (SUSPEND,
3107 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
3108 * and the driver will fully reinitialize its device during resume() method
3109 * call, the driver will set device to PCI_D3hot state in PCI config space
3110 * instead of setting it according to the @msg provided by the PM.
3111 *
3112 * Return code
3113 * 0 - driver suspended the device
3114 * Error otherwise
3115 **/
3116static int
3117lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3118{
3119 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3120 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3121
3122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3123 "0473 PCI device Power Management suspend.\n");
3124
3125 /* Bring down the device */
3126 lpfc_offline_prep(phba);
3127 lpfc_offline(phba);
3128 kthread_stop(phba->worker_thread);
3129
3130 /* Disable interrupt from device */
3131 lpfc_disable_intr(phba);
3132
3133 /* Save device state to PCI config space */
3134 pci_save_state(pdev);
3135 pci_set_power_state(pdev, PCI_D3hot);
3136
3137 return 0;
3138}
3139
3140/**
3141 * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
3142 * @pdev: pointer to PCI device
3143 *
3144 * This routine is to be registered to the kernel's PCI subsystem to support
3145 * system Power Management (PM). When PM invokes this method, it restores
3146 * the device's PCI config space state and fully reinitializes the device
3147 * and brings it online. Note that as the driver implements the minimum PM
3148 * requirements to a power-aware driver's PM for suspend/resume -- all
3149 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
3150 * method call will be treated as SUSPEND and the driver will fully
3151 * reinitialize its device during resume() method call, the device will be
3152 * set to PCI_D0 directly in PCI config space before restoring the state.
3153 *
3154 * Return code
3155 * 0 - driver suspended the device
3156 * Error otherwise
3157 **/
3158static int
3159lpfc_pci_resume_one(struct pci_dev *pdev)
3160{
3161 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3162 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3163 uint32_t intr_mode;
3164 int error;
3165
3166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3167 "0452 PCI device Power Management resume.\n");
3168
3169 /* Restore device state from PCI config space */
3170 pci_set_power_state(pdev, PCI_D0);
3171 pci_restore_state(pdev);
3172 if (pdev->is_busmaster)
3173 pci_set_master(pdev);
3174
3175 /* Startup the kernel thread for this host adapter. */
3176 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3177 "lpfc_worker_%d", phba->brd_no);
3178 if (IS_ERR(phba->worker_thread)) {
3179 error = PTR_ERR(phba->worker_thread);
3180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3181 "0434 PM resume failed to start worker "
3182 "thread: error=x%x.\n", error);
3183 return error;
3184 }
3185
3186 /* Configure and enable interrupt */
3187 intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3188 if (intr_mode == LPFC_INTR_ERROR) {
3189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3190 "0430 PM resume Failed to enable interrupt\n");
3191 return -EIO;
3192 } else
3193 phba->intr_mode = intr_mode;
3194
3195 /* Restart HBA and bring it online */
3196 lpfc_sli_brdrestart(phba);
3197 lpfc_online(phba);
3198
3199 /* Log the current active interrupt mode */
3200 lpfc_log_intr_mode(phba, phba->intr_mode);
3201
3202 return 0;
3203}
3204
3205/**
2789 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. 3206 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
2790 * @pdev: pointer to PCI device. 3207 * @pdev: pointer to PCI device.
2791 * @state: the current PCI connection state. 3208 * @state: the current PCI connection state.
@@ -2828,13 +3245,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2828 pring = &psli->ring[psli->fcp_ring]; 3245 pring = &psli->ring[psli->fcp_ring];
2829 lpfc_sli_abort_iocb_ring(phba, pring); 3246 lpfc_sli_abort_iocb_ring(phba, pring);
2830 3247
2831 if (phba->intr_type == MSIX) 3248 /* Disable interrupt */
2832 lpfc_disable_msix(phba); 3249 lpfc_disable_intr(phba);
2833 else {
2834 free_irq(phba->pcidev->irq, phba);
2835 if (phba->intr_type == MSI)
2836 pci_disable_msi(phba->pcidev);
2837 }
2838 3250
2839 /* Request a slot reset. */ 3251 /* Request a slot reset. */
2840 return PCI_ERS_RESULT_NEED_RESET; 3252 return PCI_ERS_RESULT_NEED_RESET;
@@ -2862,7 +3274,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2862 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3274 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2863 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3275 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2864 struct lpfc_sli *psli = &phba->sli; 3276 struct lpfc_sli *psli = &phba->sli;
2865 int error, retval; 3277 uint32_t intr_mode;
2866 3278
2867 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 3279 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
2868 if (pci_enable_device_mem(pdev)) { 3280 if (pci_enable_device_mem(pdev)) {
@@ -2871,61 +3283,31 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
2871 return PCI_ERS_RESULT_DISCONNECT; 3283 return PCI_ERS_RESULT_DISCONNECT;
2872 } 3284 }
2873 3285
2874 pci_set_master(pdev); 3286 pci_restore_state(pdev);
3287 if (pdev->is_busmaster)
3288 pci_set_master(pdev);
2875 3289
2876 spin_lock_irq(&phba->hbalock); 3290 spin_lock_irq(&phba->hbalock);
2877 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3291 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2878 spin_unlock_irq(&phba->hbalock); 3292 spin_unlock_irq(&phba->hbalock);
2879 3293
2880 /* Enable configured interrupt method */ 3294 /* Configure and enable interrupt */
2881 phba->intr_type = NONE; 3295 intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
2882 if (phba->cfg_use_msi == 2) { 3296 if (intr_mode == LPFC_INTR_ERROR) {
2883 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 3297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2884 error = lpfc_sli_config_port(phba, 3); 3298 "0427 Cannot re-enable interrupt after "
2885 if (error) 3299 "slot reset.\n");
2886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3300 return PCI_ERS_RESULT_DISCONNECT;
2887 "0478 Firmware not capable of SLI 3 mode.\n"); 3301 } else
2888 else { 3302 phba->intr_mode = intr_mode;
2889 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2890 "0479 Firmware capable of SLI 3 mode.\n");
2891 /* Now, try to enable MSI-X interrupt mode */
2892 error = lpfc_enable_msix(phba);
2893 if (!error) {
2894 phba->intr_type = MSIX;
2895 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2896 "0480 enable MSI-X mode.\n");
2897 }
2898 }
2899 }
2900
2901 /* Fallback to MSI if MSI-X initialization failed */
2902 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2903 retval = pci_enable_msi(phba->pcidev);
2904 if (!retval) {
2905 phba->intr_type = MSI;
2906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2907 "0481 enable MSI mode.\n");
2908 } else
2909 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2910 "0470 enable IRQ mode.\n");
2911 }
2912
2913 /* MSI-X is the only case the doesn't need to call request_irq */
2914 if (phba->intr_type != MSIX) {
2915 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2916 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2917 if (retval) {
2918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2919 "0471 Enable interrupt handler "
2920 "failed\n");
2921 } else if (phba->intr_type != MSI)
2922 phba->intr_type = INTx;
2923 }
2924 3303
2925 /* Take device offline; this will perform cleanup */ 3304 /* Take device offline; this will perform cleanup */
2926 lpfc_offline(phba); 3305 lpfc_offline(phba);
2927 lpfc_sli_brdrestart(phba); 3306 lpfc_sli_brdrestart(phba);
2928 3307
3308 /* Log the current active interrupt mode */
3309 lpfc_log_intr_mode(phba, phba->intr_mode);
3310
2929 return PCI_ERS_RESULT_RECOVERED; 3311 return PCI_ERS_RESULT_RECOVERED;
2930} 3312}
2931 3313
@@ -3037,6 +3419,8 @@ static struct pci_driver lpfc_driver = {
3037 .id_table = lpfc_id_table, 3419 .id_table = lpfc_id_table,
3038 .probe = lpfc_pci_probe_one, 3420 .probe = lpfc_pci_probe_one,
3039 .remove = __devexit_p(lpfc_pci_remove_one), 3421 .remove = __devexit_p(lpfc_pci_remove_one),
3422 .suspend = lpfc_pci_suspend_one,
3423 .resume = lpfc_pci_resume_one,
3040 .err_handler = &lpfc_err_handler, 3424 .err_handler = &lpfc_err_handler,
3041}; 3425};
3042 3426
@@ -3100,6 +3484,19 @@ lpfc_exit(void)
3100 fc_release_transport(lpfc_transport_template); 3484 fc_release_transport(lpfc_transport_template);
3101 if (lpfc_enable_npiv) 3485 if (lpfc_enable_npiv)
3102 fc_release_transport(lpfc_vport_transport_template); 3486 fc_release_transport(lpfc_vport_transport_template);
3487 if (_dump_buf_data) {
3488 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
3489 "at 0x%p\n",
3490 (1L << _dump_buf_data_order), _dump_buf_data);
3491 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
3492 }
3493
3494 if (_dump_buf_dif) {
3495 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
3496 "at 0x%p\n",
3497 (1L << _dump_buf_dif_order), _dump_buf_dif);
3498 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
3499 }
3103} 3500}
3104 3501
3105module_init(lpfc_init); 3502module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 39fd2b843bec..a85b7c196bbc 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -27,6 +27,7 @@
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x40 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockBuard events */
30#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x400 /* Miscellaneous events */
31#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x800 /* SLI events */
32#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7465fe746fe9..34eeb086a667 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -77,6 +77,38 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
77} 77}
78 78
79/** 79/**
80 * lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params.
81 * @phba: pointer to lpfc hba data structure.
82 * @pmb: pointer to the driver internal queue element for mailbox command.
83 * This function create a dump memory mailbox command to dump wake up
84 * parameters.
85 */
86void
87lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
88{
89 MAILBOX_t *mb;
90 void *ctx;
91
92 mb = &pmb->mb;
93 /* Save context so that we can restore after memset */
94 ctx = pmb->context2;
95
96 /* Setup to dump VPD region */
97 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
98 mb->mbxCommand = MBX_DUMP_MEMORY;
99 mb->mbxOwner = OWN_HOST;
100 mb->un.varDmp.cv = 1;
101 mb->un.varDmp.type = DMP_NV_PARAMS;
102 mb->un.varDmp.entry_index = 0;
103 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
104 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
105 mb->un.varDmp.co = 0;
106 mb->un.varDmp.resp_offset = 0;
107 pmb->context2 = ctx;
108 return;
109}
110
111/**
80 * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param. 112 * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
81 * @phba: pointer to lpfc hba data structure. 113 * @phba: pointer to lpfc hba data structure.
82 * @pmb: pointer to the driver internal queue element for mailbox command. 114 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -1061,9 +1093,14 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1061 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 1093 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1062 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 1094 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1063 1095
1096 /* Always Host Group Pointer is in SLIM */
1097 mb->un.varCfgPort.hps = 1;
1098
1064 /* If HBA supports SLI=3 ask for it */ 1099 /* If HBA supports SLI=3 ask for it */
1065 1100
1066 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1101 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
1102 if (phba->cfg_enable_bg)
1103 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1067 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1104 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1068 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1105 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1069 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1106 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
@@ -1163,16 +1200,11 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1163 sizeof(*phba->host_gp)); 1200 sizeof(*phba->host_gp));
1164 } 1201 }
1165 1202
1166 /* Setup Port Group ring pointer */ 1203 /* Setup Port Group offset */
1167 if (phba->sli3_options & LPFC_SLI3_INB_ENABLED) { 1204 if (phba->sli_rev == 3)
1168 pgp_offset = offsetof(struct lpfc_sli2_slim,
1169 mbx.us.s3_inb_pgp.port);
1170 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
1171 } else if (phba->sli_rev == 3) {
1172 pgp_offset = offsetof(struct lpfc_sli2_slim, 1205 pgp_offset = offsetof(struct lpfc_sli2_slim,
1173 mbx.us.s3_pgp.port); 1206 mbx.us.s3_pgp.port);
1174 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 1207 else
1175 } else
1176 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); 1208 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1177 pdma_addr = phba->slim2p.phys + pgp_offset; 1209 pdma_addr = phba->slim2p.phys + pgp_offset;
1178 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); 1210 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
@@ -1285,10 +1317,12 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1285void 1317void
1286lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1318lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1287{ 1319{
1320 unsigned long iflag;
1321
1288 /* This function expects to be called from interrupt context */ 1322 /* This function expects to be called from interrupt context */
1289 spin_lock(&phba->hbalock); 1323 spin_lock_irqsave(&phba->hbalock, iflag);
1290 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1324 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1291 spin_unlock(&phba->hbalock); 1325 spin_unlock_irqrestore(&phba->hbalock, iflag);
1292 return; 1326 return;
1293} 1327}
1294 1328
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 1accb5a9f4e6..27d1a88a98fe 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -22,18 +22,20 @@
22#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ 22#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
23#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ 23#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
24#define FC_REG_CT_EVENT 0x0004 /* CT request events */ 24#define FC_REG_CT_EVENT 0x0004 /* CT request events */
25#define FC_REG_DUMP_EVENT 0x0008 /* Dump events */ 25#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
26#define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */ 26#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
27#define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */ 27#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
28#define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */ 28#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
29#define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */ 29#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
30#define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */ 30#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
31#define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */ 31#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
32#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
32#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ 33#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
33 FC_REG_RSCN_EVENT | \ 34 FC_REG_RSCN_EVENT | \
34 FC_REG_CT_EVENT | \ 35 FC_REG_CT_EVENT | \
35 FC_REG_DUMP_EVENT | \ 36 FC_REG_DUMP_EVENT | \
36 FC_REG_TEMPERATURE_EVENT | \ 37 FC_REG_TEMPERATURE_EVENT | \
38 FC_REG_VPORTRSCN_EVENT | \
37 FC_REG_ELS_EVENT | \ 39 FC_REG_ELS_EVENT | \
38 FC_REG_FABRIC_EVENT | \ 40 FC_REG_FABRIC_EVENT | \
39 FC_REG_SCSI_EVENT | \ 41 FC_REG_SCSI_EVENT | \
@@ -52,6 +54,13 @@
52 * The payload sent via the fc transport is one-way driver->application. 54 * The payload sent via the fc transport is one-way driver->application.
53 */ 55 */
54 56
57/* RSCN event header */
58struct lpfc_rscn_event_header {
59 uint32_t event_type;
60 uint32_t payload_length; /* RSCN data length in bytes */
61 uint32_t rscn_payload[];
62};
63
55/* els event header */ 64/* els event header */
56struct lpfc_els_event_header { 65struct lpfc_els_event_header {
57 uint32_t event_type; 66 uint32_t event_type;
@@ -65,6 +74,7 @@ struct lpfc_els_event_header {
65#define LPFC_EVENT_PRLO_RCV 0x02 74#define LPFC_EVENT_PRLO_RCV 0x02
66#define LPFC_EVENT_ADISC_RCV 0x04 75#define LPFC_EVENT_ADISC_RCV 0x04
67#define LPFC_EVENT_LSRJT_RCV 0x08 76#define LPFC_EVENT_LSRJT_RCV 0x08
77#define LPFC_EVENT_LOGO_RCV 0x10
68 78
69/* special els lsrjt event */ 79/* special els lsrjt event */
70struct lpfc_lsrjt_event { 80struct lpfc_lsrjt_event {
@@ -74,6 +84,11 @@ struct lpfc_lsrjt_event {
74 uint32_t explanation; 84 uint32_t explanation;
75}; 85};
76 86
87/* special els logo event */
88struct lpfc_logo_event {
89 struct lpfc_els_event_header header;
90 uint8_t logo_wwpn[8];
91};
77 92
78/* fabric event header */ 93/* fabric event header */
79struct lpfc_fabric_event_header { 94struct lpfc_fabric_event_header {
@@ -125,6 +140,7 @@ struct lpfc_scsi_varqueuedepth_event {
125/* special case scsi check condition event */ 140/* special case scsi check condition event */
126struct lpfc_scsi_check_condition_event { 141struct lpfc_scsi_check_condition_event {
127 struct lpfc_scsi_event_header scsi_event; 142 struct lpfc_scsi_event_header scsi_event;
143 uint8_t opcode;
128 uint8_t sense_key; 144 uint8_t sense_key;
129 uint8_t asc; 145 uint8_t asc;
130 uint8_t ascq; 146 uint8_t ascq;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0c25d97acb42..8f548adae9cc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1929,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1929 if (vport->fc_flag & FC_RSCN_DEFERRED) 1929 if (vport->fc_flag & FC_RSCN_DEFERRED)
1930 return ndlp->nlp_state; 1930 return ndlp->nlp_state;
1931 1931
1932 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1932 spin_lock_irq(shost->host_lock); 1933 spin_lock_irq(shost->host_lock);
1933 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1934 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1934 spin_unlock_irq(shost->host_lock); 1935 spin_unlock_irq(shost->host_lock);
1935 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1936 return ndlp->nlp_state; 1936 return ndlp->nlp_state;
1937} 1937}
1938 1938
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bd1867411821..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21
22#include <linux/pci.h> 21#include <linux/pci.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
24#include <asm/unaligned.h>
25 25
26#include <scsi/scsi.h> 26#include <scsi/scsi.h>
27#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
28#include <scsi/scsi_eh.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
29#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
30#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
@@ -43,6 +44,73 @@
43#define LPFC_RESET_WAIT 2 44#define LPFC_RESET_WAIT 2
44#define LPFC_ABORT_WAIT 2 45#define LPFC_ABORT_WAIT 2
45 46
47int _dump_buf_done;
48
49static char *dif_op_str[] = {
50 "SCSI_PROT_NORMAL",
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
59};
60
61static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63{
64 void *src, *dst;
65 struct scatterlist *sgde = scsi_sglist(cmnd);
66
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69 __func__);
70 return;
71 }
72
73
74 if (!sgde) {
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76 return;
77 }
78
79 dst = (void *) _dump_buf_data;
80 while (sgde) {
81 src = sg_virt(sgde);
82 memcpy(dst, src, sgde->length);
83 dst += sgde->length;
84 sgde = sg_next(sgde);
85 }
86}
87
88static void
89lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90{
91 void *src, *dst;
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94 if (!_dump_buf_dif) {
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96 __func__);
97 return;
98 }
99
100 if (!sgde) {
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102 return;
103 }
104
105 dst = _dump_buf_dif;
106 while (sgde) {
107 src = sg_virt(sgde);
108 memcpy(dst, src, sgde->length);
109 dst += sgde->length;
110 sgde = sg_next(sgde);
111 }
112}
113
46/** 114/**
47 * lpfc_update_stats: Update statistical data for the command completion. 115 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object. 116 * @phba: Pointer to HBA object.
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
66 if (cmd->result) 134 if (cmd->result)
67 return; 135 return;
68 136
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
69 spin_lock_irqsave(shost->host_lock, flags); 139 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled || 140 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked || 141 vport->stat_data_blocked ||
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
74 spin_unlock_irqrestore(shost->host_lock, flags); 144 spin_unlock_irqrestore(shost->host_lock, flags);
75 return; 145 return;
76 } 146 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78 147
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step; 150 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT) 151 /* check array subscript bounds */
83 i = LPFC_MAX_BUCKET_COUNT; 152 if (i < 0)
153 i = 0;
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
84 } else { 156 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base + 158 if (latency <= (phba->bucket_base +
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
92 spin_unlock_irqrestore(shost->host_lock, flags); 164 spin_unlock_irqrestore(shost->host_lock, flags);
93} 165}
94 166
95
96/** 167/**
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change 168 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * event. 169 * event.
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
148 return; 219 return;
149} 220}
150 221
151/* 222/**
152 * This function is called with no lock held when there is a resource 223 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
153 * error in driver or in firmware. 224 * @phba: The Hba for which this call is being executed.
154 */ 225 *
226 * This routine is called when there is resource error in driver or firmware.
227 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
228 * posts at most 1 event each second. This routine wakes up worker thread of
229 * @phba to process WORKER_RAM_DOWN_EVENT event.
230 *
231 * This routine should be called with no lock held.
232 **/
155void 233void
156lpfc_adjust_queue_depth(struct lpfc_hba *phba) 234lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
157{ 235{
158 unsigned long flags; 236 unsigned long flags;
159 uint32_t evt_posted; 237 uint32_t evt_posted;
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
182 return; 260 return;
183} 261}
184 262
185/* 263/**
186 * This function is called with no lock held when there is a successful 264 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
187 * SCSI command completion. 265 * @phba: The Hba for which this call is being executed.
188 */ 266 *
267 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
268 * post at most 1 event every 5 minute after last_ramp_up_time or
269 * last_rsrc_error_time. This routine wakes up worker thread of @phba
270 * to process WORKER_RAM_DOWN_EVENT event.
271 *
272 * This routine should be called with no lock held.
273 **/
189static inline void 274static inline void
190lpfc_rampup_queue_depth(struct lpfc_vport *vport, 275lpfc_rampup_queue_depth(struct lpfc_vport *vport,
191 struct scsi_device *sdev) 276 struct scsi_device *sdev)
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
217 return; 302 return;
218} 303}
219 304
305/**
306 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
307 * @phba: The Hba for which this call is being executed.
308 *
309 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
310 * thread.This routine reduces queue depth for all scsi device on each vport
311 * associated with @phba.
312 **/
220void 313void
221lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 314lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
222{ 315{
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
267 atomic_set(&phba->num_cmd_success, 0); 360 atomic_set(&phba->num_cmd_success, 0);
268} 361}
269 362
363/**
364 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
365 * @phba: The Hba for which this call is being executed.
366 *
367 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
368 * thread.This routine increases queue depth for all scsi device on each vport
369 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
370 * num_cmd_success to zero.
371 **/
270void 372void
271lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 373lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
272{ 374{
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
336 lpfc_destroy_vport_work_array(phba, vports); 438 lpfc_destroy_vport_work_array(phba, vports);
337} 439}
338 440
339/* 441/**
442 * lpfc_new_scsi_buf: Scsi buffer allocator.
443 * @vport: The virtual port for which this call being executed.
444 *
340 * This routine allocates a scsi buffer, which contains all the necessary 445 * This routine allocates a scsi buffer, which contains all the necessary
341 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 446 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
342 * contains information to build the IOCB. The DMAable region contains 447 * contains information to build the IOCB. The DMAable region contains
343 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 448 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
344 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 449 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
345 * and the BPL BDE is setup in the IOCB. 450 * and the BPL BDE is setup in the IOCB.
346 */ 451 *
452 * Return codes:
453 * NULL - Error
454 * Pointer to lpfc_scsi_buf data structure - Success
455 **/
347static struct lpfc_scsi_buf * 456static struct lpfc_scsi_buf *
348lpfc_new_scsi_buf(struct lpfc_vport *vport) 457lpfc_new_scsi_buf(struct lpfc_vport *vport)
349{ 458{
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
407 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 516 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
408 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 517 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
409 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 518 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
410 bpl[0].tus.w = le32_to_cpu(bpl->tus.w); 519 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
411 520
412 /* Setup the physical region for the FCP RSP */ 521 /* Setup the physical region for the FCP RSP */
413 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 522 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
414 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 523 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
415 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 524 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
416 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 525 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
417 bpl[1].tus.w = le32_to_cpu(bpl->tus.w); 526 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
418 527
419 /* 528 /*
420 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 529 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
422 */ 531 */
423 iocb = &psb->cur_iocbq.iocb; 532 iocb = &psb->cur_iocbq.iocb;
424 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 533 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
425 if (phba->sli_rev == 3) { 534 if ((phba->sli_rev == 3) &&
535 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
426 /* fill in immediate fcp command BDE */ 536 /* fill in immediate fcp command BDE */
427 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 537 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
428 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 538 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
452 return psb; 562 return psb;
453} 563}
454 564
565/**
566 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
567 * @phba: The Hba for which this call is being executed.
568 *
569 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
570 * and returns to caller.
571 *
572 * Return codes:
573 * NULL - Error
574 * Pointer to lpfc_scsi_buf - Success
575 **/
455static struct lpfc_scsi_buf* 576static struct lpfc_scsi_buf*
456lpfc_get_scsi_buf(struct lpfc_hba * phba) 577lpfc_get_scsi_buf(struct lpfc_hba * phba)
457{ 578{
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
464 if (lpfc_cmd) { 585 if (lpfc_cmd) {
465 lpfc_cmd->seg_cnt = 0; 586 lpfc_cmd->seg_cnt = 0;
466 lpfc_cmd->nonsg_phys = 0; 587 lpfc_cmd->nonsg_phys = 0;
588 lpfc_cmd->prot_seg_cnt = 0;
467 } 589 }
468 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 590 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
469 return lpfc_cmd; 591 return lpfc_cmd;
470} 592}
471 593
594/**
595 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
596 * @phba: The Hba for which this call is being executed.
597 * @psb: The scsi buffer which is being released.
598 *
599 * This routine releases @psb scsi buffer by adding it to tail of @phba
600 * lpfc_scsi_buf_list list.
601 **/
472static void 602static void
473lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 603lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
474{ 604{
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
480 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 610 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
481} 611}
482 612
613/**
614 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
615 * @phba: The Hba for which this call is being executed.
616 * @lpfc_cmd: The scsi buffer which is going to be mapped.
617 *
618 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
619 * field of @lpfc_cmd. This routine scans through sg elements and format the
620 * bdea. This routine also initializes all IOCB fields which are dependent on
621 * scsi command request buffer.
622 *
623 * Return codes:
624 * 1 - Error
625 * 0 - Success
626 **/
483static int 627static int
484lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 628lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
485{ 629{
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
516 lpfc_cmd->seg_cnt = nseg; 660 lpfc_cmd->seg_cnt = nseg;
517 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 661 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
518 printk(KERN_ERR "%s: Too many sg segments from " 662 printk(KERN_ERR "%s: Too many sg segments from "
519 "dma_map_sg. Config %d, seg_cnt %d", 663 "dma_map_sg. Config %d, seg_cnt %d\n",
520 __func__, phba->cfg_sg_seg_cnt, 664 __func__, phba->cfg_sg_seg_cnt,
521 lpfc_cmd->seg_cnt); 665 lpfc_cmd->seg_cnt);
522 scsi_dma_unmap(scsi_cmnd); 666 scsi_dma_unmap(scsi_cmnd);
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
535 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 679 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
536 physaddr = sg_dma_address(sgel); 680 physaddr = sg_dma_address(sgel);
537 if (phba->sli_rev == 3 && 681 if (phba->sli_rev == 3 &&
682 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
538 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 683 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
539 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 684 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
540 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 685 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
560 * explicitly reinitialized and for SLI-3 the extended bde count is 705 * explicitly reinitialized and for SLI-3 the extended bde count is
561 * explicitly reinitialized since all iocb memory resources are reused. 706 * explicitly reinitialized since all iocb memory resources are reused.
562 */ 707 */
563 if (phba->sli_rev == 3) { 708 if (phba->sli_rev == 3 &&
709 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
564 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 710 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
565 /* 711 /*
566 * The extended IOCB format can only fit 3 BDE or a BPL. 712 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
587 ((num_bde + 2) * sizeof(struct ulp_bde64)); 733 ((num_bde + 2) * sizeof(struct ulp_bde64));
588 } 734 }
589 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 735 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
736
737 /*
738 * Due to difference in data length between DIF/non-DIF paths,
739 * we need to set word 4 of IOCB here
740 */
741 iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
742 return 0;
743}
744
745/*
746 * Given a scsi cmnd, determine the BlockGuard profile to be used
747 * with the cmd
748 */
749static int
750lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
751{
752 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
753 uint8_t ret_prof = LPFC_PROF_INVALID;
754
755 if (guard_type == SHOST_DIX_GUARD_IP) {
756 switch (scsi_get_prot_op(sc)) {
757 case SCSI_PROT_READ_INSERT:
758 case SCSI_PROT_WRITE_STRIP:
759 ret_prof = LPFC_PROF_AST2;
760 break;
761
762 case SCSI_PROT_READ_STRIP:
763 case SCSI_PROT_WRITE_INSERT:
764 ret_prof = LPFC_PROF_A1;
765 break;
766
767 case SCSI_PROT_READ_CONVERT:
768 case SCSI_PROT_WRITE_CONVERT:
769 ret_prof = LPFC_PROF_AST1;
770 break;
771
772 case SCSI_PROT_READ_PASS:
773 case SCSI_PROT_WRITE_PASS:
774 case SCSI_PROT_NORMAL:
775 default:
776 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
777 scsi_get_prot_op(sc), guard_type);
778 break;
779
780 }
781 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
782 switch (scsi_get_prot_op(sc)) {
783 case SCSI_PROT_READ_STRIP:
784 case SCSI_PROT_WRITE_INSERT:
785 ret_prof = LPFC_PROF_A1;
786 break;
787
788 case SCSI_PROT_READ_PASS:
789 case SCSI_PROT_WRITE_PASS:
790 ret_prof = LPFC_PROF_C1;
791 break;
792
793 case SCSI_PROT_READ_CONVERT:
794 case SCSI_PROT_WRITE_CONVERT:
795 case SCSI_PROT_READ_INSERT:
796 case SCSI_PROT_WRITE_STRIP:
797 case SCSI_PROT_NORMAL:
798 default:
799 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
800 scsi_get_prot_op(sc), guard_type);
801 break;
802 }
803 } else {
804 /* unsupported format */
805 BUG();
806 }
807
808 return ret_prof;
809}
810
811struct scsi_dif_tuple {
812 __be16 guard_tag; /* Checksum */
813 __be16 app_tag; /* Opaque storage */
814 __be32 ref_tag; /* Target LBA or indirect LBA */
815};
816
817static inline unsigned
818lpfc_cmd_blksize(struct scsi_cmnd *sc)
819{
820 return sc->device->sector_size;
821}
822
823/**
824 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
825 * @sc: in: SCSI command
826 * @apptagmask out: app tag mask
827 * @apptagval out: app tag value
828 * @reftag out: ref tag (reference tag)
829 *
830 * Description:
831 * Extract DIF paramters from the command if possible. Otherwise,
832 * use default paratmers.
833 *
834 **/
835static inline void
836lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
837 uint16_t *apptagval, uint32_t *reftag)
838{
839 struct scsi_dif_tuple *spt;
840 unsigned char op = scsi_get_prot_op(sc);
841 unsigned int protcnt = scsi_prot_sg_count(sc);
842 static int cnt;
843
844 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
845 op == SCSI_PROT_WRITE_PASS ||
846 op == SCSI_PROT_WRITE_CONVERT)) {
847
848 cnt++;
849 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
850 scsi_prot_sglist(sc)[0].offset;
851 *apptagmask = 0;
852 *apptagval = 0;
853 *reftag = cpu_to_be32(spt->ref_tag);
854
855 } else {
856 /* SBC defines ref tag to be lower 32bits of LBA */
857 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
858 *apptagmask = 0;
859 *apptagval = 0;
860 }
861}
862
863/*
864 * This function sets up buffer list for protection groups of
865 * type LPFC_PG_TYPE_NO_DIF
866 *
867 * This is usually used when the HBA is instructed to generate
868 * DIFs and insert them into data stream (or strip DIF from
869 * incoming data stream)
870 *
871 * The buffer list consists of just one protection group described
872 * below:
873 * +-------------------------+
874 * start of prot group --> | PDE_1 |
875 * +-------------------------+
876 * | Data BDE |
877 * +-------------------------+
878 * |more Data BDE's ... (opt)|
879 * +-------------------------+
880 *
881 * @sc: pointer to scsi command we're working on
882 * @bpl: pointer to buffer list for protection groups
883 * @datacnt: number of segments of data that have been dma mapped
884 *
885 * Note: Data s/g buffers have been dma mapped
886 */
887static int
888lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
889 struct ulp_bde64 *bpl, int datasegcnt)
890{
891 struct scatterlist *sgde = NULL; /* s/g data entry */
892 struct lpfc_pde *pde1 = NULL;
893 dma_addr_t physaddr;
894 int i = 0, num_bde = 0;
895 int datadir = sc->sc_data_direction;
896 int prof = LPFC_PROF_INVALID;
897 unsigned blksize;
898 uint32_t reftag;
899 uint16_t apptagmask, apptagval;
900
901 pde1 = (struct lpfc_pde *) bpl;
902 prof = lpfc_sc_to_sli_prof(sc);
903
904 if (prof == LPFC_PROF_INVALID)
905 goto out;
906
907 /* extract some info from the scsi command for PDE1*/
908 blksize = lpfc_cmd_blksize(sc);
909 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
910
911 /* setup PDE1 with what we have */
912 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
913 BG_EC_STOP_ERR);
914 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
915
916 num_bde++;
917 bpl++;
918
919 /* assumption: caller has already run dma_map_sg on command data */
920 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
921 physaddr = sg_dma_address(sgde);
922 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
923 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
924 bpl->tus.f.bdeSize = sg_dma_len(sgde);
925 if (datadir == DMA_TO_DEVICE)
926 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
927 else
928 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
929 bpl->tus.w = le32_to_cpu(bpl->tus.w);
930 bpl++;
931 num_bde++;
932 }
933
934out:
935 return num_bde;
936}
937
938/*
939 * This function sets up buffer list for protection groups of
940 * type LPFC_PG_TYPE_DIF_BUF
941 *
942 * This is usually used when DIFs are in their own buffers,
943 * separate from the data. The HBA can then by instructed
944 * to place the DIFs in the outgoing stream. For read operations,
945 * The HBA could extract the DIFs and place it in DIF buffers.
946 *
947 * The buffer list for this type consists of one or more of the
948 * protection groups described below:
949 * +-------------------------+
950 * start of first prot group --> | PDE_1 |
951 * +-------------------------+
952 * | PDE_3 (Prot BDE) |
953 * +-------------------------+
954 * | Data BDE |
955 * +-------------------------+
956 * |more Data BDE's ... (opt)|
957 * +-------------------------+
958 * start of new prot group --> | PDE_1 |
959 * +-------------------------+
960 * | ... |
961 * +-------------------------+
962 *
963 * @sc: pointer to scsi command we're working on
964 * @bpl: pointer to buffer list for protection groups
965 * @datacnt: number of segments of data that have been dma mapped
966 * @protcnt: number of segment of protection data that have been dma mapped
967 *
968 * Note: It is assumed that both data and protection s/g buffers have been
969 * mapped for DMA
970 */
971static int
972lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
973 struct ulp_bde64 *bpl, int datacnt, int protcnt)
974{
975 struct scatterlist *sgde = NULL; /* s/g data entry */
976 struct scatterlist *sgpe = NULL; /* s/g prot entry */
977 struct lpfc_pde *pde1 = NULL;
978 struct ulp_bde64 *prot_bde = NULL;
979 dma_addr_t dataphysaddr, protphysaddr;
980 unsigned short curr_data = 0, curr_prot = 0;
981 unsigned int split_offset, protgroup_len;
982 unsigned int protgrp_blks, protgrp_bytes;
983 unsigned int remainder, subtotal;
984 int prof = LPFC_PROF_INVALID;
985 int datadir = sc->sc_data_direction;
986 unsigned char pgdone = 0, alldone = 0;
987 unsigned blksize;
988 uint32_t reftag;
989 uint16_t apptagmask, apptagval;
990 int num_bde = 0;
991
992 sgpe = scsi_prot_sglist(sc);
993 sgde = scsi_sglist(sc);
994
995 if (!sgpe || !sgde) {
996 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
997 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
998 sgpe, sgde);
999 return 0;
1000 }
1001
1002 prof = lpfc_sc_to_sli_prof(sc);
1003 if (prof == LPFC_PROF_INVALID)
1004 goto out;
1005
1006 /* extract some info from the scsi command for PDE1*/
1007 blksize = lpfc_cmd_blksize(sc);
1008 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1009
1010 split_offset = 0;
1011 do {
1012 /* setup the first PDE_1 */
1013 pde1 = (struct lpfc_pde *) bpl;
1014
1015 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1016 BG_EC_STOP_ERR);
1017 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1018
1019 num_bde++;
1020 bpl++;
1021
1022 /* setup the first BDE that points to protection buffer */
1023 prot_bde = (struct ulp_bde64 *) bpl;
1024 protphysaddr = sg_dma_address(sgpe);
1025 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1026 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1027 protgroup_len = sg_dma_len(sgpe);
1028
1029
1030 /* must be integer multiple of the DIF block length */
1031 BUG_ON(protgroup_len % 8);
1032
1033 protgrp_blks = protgroup_len / 8;
1034 protgrp_bytes = protgrp_blks * blksize;
1035
1036 prot_bde->tus.f.bdeSize = protgroup_len;
1037 if (datadir == DMA_TO_DEVICE)
1038 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1039 else
1040 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1041 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1042
1043 curr_prot++;
1044 num_bde++;
1045
1046 /* setup BDE's for data blocks associated with DIF data */
1047 pgdone = 0;
1048 subtotal = 0; /* total bytes processed for current prot grp */
1049 while (!pgdone) {
1050 if (!sgde) {
1051 printk(KERN_ERR "%s Invalid data segment\n",
1052 __func__);
1053 return 0;
1054 }
1055 bpl++;
1056 dataphysaddr = sg_dma_address(sgde) + split_offset;
1057 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1058 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1059
1060 remainder = sg_dma_len(sgde) - split_offset;
1061
1062 if ((subtotal + remainder) <= protgrp_bytes) {
1063 /* we can use this whole buffer */
1064 bpl->tus.f.bdeSize = remainder;
1065 split_offset = 0;
1066
1067 if ((subtotal + remainder) == protgrp_bytes)
1068 pgdone = 1;
1069 } else {
1070 /* must split this buffer with next prot grp */
1071 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1072 split_offset += bpl->tus.f.bdeSize;
1073 }
1074
1075 subtotal += bpl->tus.f.bdeSize;
1076
1077 if (datadir == DMA_TO_DEVICE)
1078 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1079 else
1080 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1081 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1082
1083 num_bde++;
1084 curr_data++;
1085
1086 if (split_offset)
1087 break;
1088
1089 /* Move to the next s/g segment if possible */
1090 sgde = sg_next(sgde);
1091 }
1092
1093 /* are we done ? */
1094 if (curr_prot == protcnt) {
1095 alldone = 1;
1096 } else if (curr_prot < protcnt) {
1097 /* advance to next prot buffer */
1098 sgpe = sg_next(sgpe);
1099 bpl++;
1100
1101 /* update the reference tag */
1102 reftag += protgrp_blks;
1103 } else {
1104 /* if we're here, we have a bug */
1105 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1106 }
1107
1108 } while (!alldone);
1109
1110out:
1111
1112
1113 return num_bde;
1114}
1115/*
1116 * Given a SCSI command that supports DIF, determine composition of protection
1117 * groups involved in setting up buffer lists
1118 *
1119 * Returns:
1120 * for DIF (for both read and write)
1121 * */
1122static int
1123lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1124{
1125 int ret = LPFC_PG_TYPE_INVALID;
1126 unsigned char op = scsi_get_prot_op(sc);
1127
1128 switch (op) {
1129 case SCSI_PROT_READ_STRIP:
1130 case SCSI_PROT_WRITE_INSERT:
1131 ret = LPFC_PG_TYPE_NO_DIF;
1132 break;
1133 case SCSI_PROT_READ_INSERT:
1134 case SCSI_PROT_WRITE_STRIP:
1135 case SCSI_PROT_READ_PASS:
1136 case SCSI_PROT_WRITE_PASS:
1137 case SCSI_PROT_WRITE_CONVERT:
1138 case SCSI_PROT_READ_CONVERT:
1139 ret = LPFC_PG_TYPE_DIF_BUF;
1140 break;
1141 default:
1142 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1143 "9021 Unsupported protection op:%d\n", op);
1144 break;
1145 }
1146
1147 return ret;
1148}
1149
1150/*
1151 * This is the protection/DIF aware version of
1152 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1153 * two functions eventually, but for now, it's here
1154 */
1155static int
1156lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1157 struct lpfc_scsi_buf *lpfc_cmd)
1158{
1159 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1160 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1161 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1162 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1163 uint32_t num_bde = 0;
1164 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1165 int prot_group_type = 0;
1166 int diflen, fcpdl;
1167 unsigned blksize;
1168
1169 /*
1170 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1171 * fcp_rsp regions to the first data bde entry
1172 */
1173 bpl += 2;
1174 if (scsi_sg_count(scsi_cmnd)) {
1175 /*
1176 * The driver stores the segment count returned from pci_map_sg
1177 * because this a count of dma-mappings used to map the use_sg
1178 * pages. They are not guaranteed to be the same for those
1179 * architectures that implement an IOMMU.
1180 */
1181 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1182 scsi_sglist(scsi_cmnd),
1183 scsi_sg_count(scsi_cmnd), datadir);
1184 if (unlikely(!datasegcnt))
1185 return 1;
1186
1187 lpfc_cmd->seg_cnt = datasegcnt;
1188 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1189 printk(KERN_ERR "%s: Too many sg segments from "
1190 "dma_map_sg. Config %d, seg_cnt %d\n",
1191 __func__, phba->cfg_sg_seg_cnt,
1192 lpfc_cmd->seg_cnt);
1193 scsi_dma_unmap(scsi_cmnd);
1194 return 1;
1195 }
1196
1197 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1198
1199 switch (prot_group_type) {
1200 case LPFC_PG_TYPE_NO_DIF:
1201 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1202 datasegcnt);
1203 /* we shoud have 2 or more entries in buffer list */
1204 if (num_bde < 2)
1205 goto err;
1206 break;
1207 case LPFC_PG_TYPE_DIF_BUF:{
1208 /*
1209 * This type indicates that protection buffers are
1210 * passed to the driver, so that needs to be prepared
1211 * for DMA
1212 */
1213 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1214 scsi_prot_sglist(scsi_cmnd),
1215 scsi_prot_sg_count(scsi_cmnd), datadir);
1216 if (unlikely(!protsegcnt)) {
1217 scsi_dma_unmap(scsi_cmnd);
1218 return 1;
1219 }
1220
1221 lpfc_cmd->prot_seg_cnt = protsegcnt;
1222 if (lpfc_cmd->prot_seg_cnt
1223 > phba->cfg_prot_sg_seg_cnt) {
1224 printk(KERN_ERR "%s: Too many prot sg segments "
1225 "from dma_map_sg. Config %d,"
1226 "prot_seg_cnt %d\n", __func__,
1227 phba->cfg_prot_sg_seg_cnt,
1228 lpfc_cmd->prot_seg_cnt);
1229 dma_unmap_sg(&phba->pcidev->dev,
1230 scsi_prot_sglist(scsi_cmnd),
1231 scsi_prot_sg_count(scsi_cmnd),
1232 datadir);
1233 scsi_dma_unmap(scsi_cmnd);
1234 return 1;
1235 }
1236
1237 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1238 datasegcnt, protsegcnt);
1239 /* we shoud have 3 or more entries in buffer list */
1240 if (num_bde < 3)
1241 goto err;
1242 break;
1243 }
1244 case LPFC_PG_TYPE_INVALID:
1245 default:
1246 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1247 "9022 Unexpected protection group %i\n",
1248 prot_group_type);
1249 return 1;
1250 }
1251 }
1252
1253 /*
1254 * Finish initializing those IOCB fields that are dependent on the
1255 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1256 * reinitialized since all iocb memory resources are used many times
1257 * for transmit, receive, and continuation bpl's.
1258 */
1259 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1260 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1261 iocb_cmd->ulpBdeCount = 1;
1262 iocb_cmd->ulpLe = 1;
1263
1264 fcpdl = scsi_bufflen(scsi_cmnd);
1265
1266 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1267 /*
1268 * We are in DIF Type 1 mode
1269 * Every data block has a 8 byte DIF (trailer)
1270 * attached to it. Must ajust FCP data length
1271 */
1272 blksize = lpfc_cmd_blksize(scsi_cmnd);
1273 diflen = (fcpdl / blksize) * 8;
1274 fcpdl += diflen;
1275 }
1276 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1277
1278 /*
1279 * Due to difference in data length between DIF/non-DIF paths,
1280 * we need to set word 4 of IOCB here
1281 */
1282 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1283
590 return 0; 1284 return 0;
1285err:
1286 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1287 "9023 Could not setup all needed BDE's"
1288 "prot_group_type=%d, num_bde=%d\n",
1289 prot_group_type, num_bde);
1290 return 1;
1291}
1292
1293/*
1294 * This function checks for BlockGuard errors detected by
1295 * the HBA. In case of errors, the ASC/ASCQ fields in the
1296 * sense buffer will be set accordingly, paired with
1297 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1298 * detected corruption.
1299 *
1300 * Returns:
1301 * 0 - No error found
1302 * 1 - BlockGuard error found
1303 * -1 - Internal error (bad profile, ...etc)
1304 */
1305static int
1306lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1307 struct lpfc_iocbq *pIocbOut)
1308{
1309 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1310 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1311 int ret = 0;
1312 uint32_t bghm = bgf->bghm;
1313 uint32_t bgstat = bgf->bgstat;
1314 uint64_t failing_sector = 0;
1315
1316 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1317 "bgstat=0x%x bghm=0x%x\n",
1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1319 cmd->request->nr_sectors, bgstat, bghm);
1320
1321 spin_lock(&_dump_buf_lock);
1322 if (!_dump_buf_done) {
1323 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1324 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1325 lpfc_debug_save_data(cmd);
1326
1327 /* If we have a prot sgl, save the DIF buffer */
1328 if (lpfc_prot_group_type(phba, cmd) ==
1329 LPFC_PG_TYPE_DIF_BUF) {
1330 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1331 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1332 lpfc_debug_save_dif(cmd);
1333 }
1334
1335 _dump_buf_done = 1;
1336 }
1337 spin_unlock(&_dump_buf_lock);
1338
1339 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1340 cmd->result = ScsiResult(DID_ERROR, 0);
1341 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1342 bgstat);
1343 ret = (-1);
1344 goto out;
1345 }
1346
1347 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1348 cmd->result = ScsiResult(DID_ERROR, 0);
1349 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1350 bgstat);
1351 ret = (-1);
1352 goto out;
1353 }
1354
1355 if (lpfc_bgs_get_guard_err(bgstat)) {
1356 ret = 1;
1357
1358 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1359 0x10, 0x1);
1360 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1361 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1362 phba->bg_guard_err_cnt++;
1363 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1364 }
1365
1366 if (lpfc_bgs_get_reftag_err(bgstat)) {
1367 ret = 1;
1368
1369 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1370 0x10, 0x3);
1371 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1372 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1373
1374 phba->bg_reftag_err_cnt++;
1375 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1376 }
1377
1378 if (lpfc_bgs_get_apptag_err(bgstat)) {
1379 ret = 1;
1380
1381 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1382 0x10, 0x2);
1383 cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
1384 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1385
1386 phba->bg_apptag_err_cnt++;
1387 printk(KERN_ERR "BLKGRD: app_tag error\n");
1388 }
1389
1390 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1391 /*
1392 * setup sense data descriptor 0 per SPC-4 as an information
1393 * field, and put the failing LBA in it
1394 */
1395 cmd->sense_buffer[8] = 0; /* Information */
1396 cmd->sense_buffer[9] = 0xa; /* Add. length */
1397 do_div(bghm, cmd->device->sector_size);
1398
1399 failing_sector = scsi_get_lba(cmd);
1400 failing_sector += bghm;
1401
1402 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1403 }
1404
1405 if (!ret) {
1406 /* No error was reported - problem in FW? */
1407 cmd->result = ScsiResult(DID_ERROR, 0);
1408 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1409 }
1410
1411out:
1412 return ret;
591} 1413}
592 1414
593/** 1415/**
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
681 lpfc_worker_wake_up(phba); 1503 lpfc_worker_wake_up(phba);
682 return; 1504 return;
683} 1505}
1506
1507/**
1508 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
1509 * @phba: The Hba for which this call is being executed.
1510 * @psb: The scsi buffer which is going to be un-mapped.
1511 *
1512 * This routine does DMA un-mapping of scatter gather list of scsi command
1513 * field of @lpfc_cmd.
1514 **/
684static void 1515static void
685lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1516lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
686{ 1517{
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
692 */ 1523 */
693 if (psb->seg_cnt > 0) 1524 if (psb->seg_cnt > 0)
694 scsi_dma_unmap(psb->pCmd); 1525 scsi_dma_unmap(psb->pCmd);
1526 if (psb->prot_seg_cnt > 0)
1527 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1528 scsi_prot_sg_count(psb->pCmd),
1529 psb->pCmd->sc_data_direction);
695} 1530}
696 1531
1532/**
1533 * lpfc_handler_fcp_err: FCP response handler.
1534 * @vport: The virtual port for which this call is being executed.
1535 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1536 * @rsp_iocb: The response IOCB which contains FCP error.
1537 *
1538 * This routine is called to process response IOCB with status field
1539 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1540 * based upon SCSI and FCP error.
1541 **/
697static void 1542static void
698lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1543lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
699 struct lpfc_iocbq *rsp_iocb) 1544 struct lpfc_iocbq *rsp_iocb)
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
735 logit = LOG_FCP; 1580 logit = LOG_FCP;
736 1581
737 lpfc_printf_vlog(vport, KERN_WARNING, logit, 1582 lpfc_printf_vlog(vport, KERN_WARNING, logit,
738 "0730 FCP command x%x failed: x%x SNS x%x x%x " 1583 "9024 FCP command x%x failed: x%x SNS x%x x%x "
739 "Data: x%x x%x x%x x%x x%x\n", 1584 "Data: x%x x%x x%x x%x x%x\n",
740 cmnd->cmnd[0], scsi_status, 1585 cmnd->cmnd[0], scsi_status,
741 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 1586 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
758 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 1603 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
759 1604
760 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1605 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
761 "0716 FCP Read Underrun, expected %d, " 1606 "9025 FCP Read Underrun, expected %d, "
762 "residual %d Data: x%x x%x x%x\n", 1607 "residual %d Data: x%x x%x x%x\n",
763 be32_to_cpu(fcpcmd->fcpDl), 1608 be32_to_cpu(fcpcmd->fcpDl),
764 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 1609 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
774 (scsi_get_resid(cmnd) != fcpi_parm)) { 1619 (scsi_get_resid(cmnd) != fcpi_parm)) {
775 lpfc_printf_vlog(vport, KERN_WARNING, 1620 lpfc_printf_vlog(vport, KERN_WARNING,
776 LOG_FCP | LOG_FCP_ERROR, 1621 LOG_FCP | LOG_FCP_ERROR,
777 "0735 FCP Read Check Error " 1622 "9026 FCP Read Check Error "
778 "and Underrun Data: x%x x%x x%x x%x\n", 1623 "and Underrun Data: x%x x%x x%x x%x\n",
779 be32_to_cpu(fcpcmd->fcpDl), 1624 be32_to_cpu(fcpcmd->fcpDl),
780 scsi_get_resid(cmnd), fcpi_parm, 1625 scsi_get_resid(cmnd), fcpi_parm,
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
793 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 1638 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
794 < cmnd->underflow)) { 1639 < cmnd->underflow)) {
795 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1640 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
796 "0717 FCP command x%x residual " 1641 "9027 FCP command x%x residual "
797 "underrun converted to error " 1642 "underrun converted to error "
798 "Data: x%x x%x x%x\n", 1643 "Data: x%x x%x x%x\n",
799 cmnd->cmnd[0], scsi_bufflen(cmnd), 1644 cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
802 } 1647 }
803 } else if (resp_info & RESID_OVER) { 1648 } else if (resp_info & RESID_OVER) {
804 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1649 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
805 "0720 FCP command x%x residual overrun error. " 1650 "9028 FCP command x%x residual overrun error. "
806 "Data: x%x x%x \n", cmnd->cmnd[0], 1651 "Data: x%x x%x \n", cmnd->cmnd[0],
807 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 1652 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
808 host_status = DID_ERROR; 1653 host_status = DID_ERROR;
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
814 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 1659 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
815 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 1660 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
816 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 1661 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
817 "0734 FCP Read Check Error Data: " 1662 "9029 FCP Read Check Error Data: "
818 "x%x x%x x%x x%x\n", 1663 "x%x x%x x%x x%x\n",
819 be32_to_cpu(fcpcmd->fcpDl), 1664 be32_to_cpu(fcpcmd->fcpDl),
820 be32_to_cpu(fcprsp->rspResId), 1665 be32_to_cpu(fcprsp->rspResId),
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 1673 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
829} 1674}
830 1675
1676/**
1677 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
1678 * @phba: The Hba for which this call is being executed.
1679 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1680 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1681 *
1682 * This routine assigns scsi command result by looking into response IOCB
1683 * status field appropriately. This routine handles QUEUE FULL condition as
1684 * well by ramping down device queue depth.
1685 **/
831static void 1686static void
832lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 1687lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
833 struct lpfc_iocbq *pIocbOut) 1688 struct lpfc_iocbq *pIocbOut)
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
846 1701
847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 1702 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 1703 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 atomic_dec(&pnode->cmd_pending); 1704 if (pnode && NLP_CHK_NODE_ACT(pnode))
1705 atomic_dec(&pnode->cmd_pending);
850 1706
851 if (lpfc_cmd->status) { 1707 if (lpfc_cmd->status) {
852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 1708 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
856 lpfc_cmd->status = IOSTAT_DEFAULT; 1712 lpfc_cmd->status = IOSTAT_DEFAULT;
857 1713
858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1714 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
859 "0729 FCP cmd x%x failed <%d/%d> " 1715 "9030 FCP cmd x%x failed <%d/%d> "
860 "status: x%x result: x%x Data: x%x x%x\n", 1716 "status: x%x result: x%x Data: x%x x%x\n",
861 cmd->cmnd[0], 1717 cmd->cmnd[0],
862 cmd->device ? cmd->device->id : 0xffff, 1718 cmd->device ? cmd->device->id : 0xffff,
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
904 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 1760 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
905 cmd->result = ScsiResult(DID_REQUEUE, 0); 1761 cmd->result = ScsiResult(DID_REQUEUE, 0);
906 break; 1762 break;
907 } /* else: fall through */ 1763 }
1764
1765 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1766 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1767 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1768 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1769 /*
1770 * This is a response for a BG enabled
1771 * cmd. Parse BG error
1772 */
1773 lpfc_parse_bg_err(phba, lpfc_cmd,
1774 pIocbOut);
1775 break;
1776 } else {
1777 lpfc_printf_vlog(vport, KERN_WARNING,
1778 LOG_BG,
1779 "9031 non-zero BGSTAT "
1780 "on unprotected cmd");
1781 }
1782 }
1783
1784 /* else: fall through */
908 default: 1785 default:
909 cmd->result = ScsiResult(DID_ERROR, 0); 1786 cmd->result = ScsiResult(DID_ERROR, 0);
910 break; 1787 break;
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
936 time_after(jiffies, lpfc_cmd->start_time + 1813 time_after(jiffies, lpfc_cmd->start_time +
937 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 spin_lock_irqsave(sdev->host->host_lock, flags); 1815 spin_lock_irqsave(sdev->host->host_lock, flags);
939 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && 1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
940 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && 1817 if (pnode->cmd_qdepth >
941 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) 1818 atomic_read(&pnode->cmd_pending) &&
942 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); 1819 (atomic_read(&pnode->cmd_pending) >
943 1820 LPFC_MIN_TGT_QDEPTH) &&
944 pnode->last_change_time = jiffies; 1821 ((cmd->cmnd[0] == READ_10) ||
1822 (cmd->cmnd[0] == WRITE_10)))
1823 pnode->cmd_qdepth =
1824 atomic_read(&pnode->cmd_pending);
1825
1826 pnode->last_change_time = jiffies;
1827 }
945 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1828 spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 time_after(jiffies, pnode->last_change_time + 1831 time_after(jiffies, pnode->last_change_time +
948 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 spin_lock_irqsave(sdev->host->host_lock, flags); 1833 spin_lock_irqsave(sdev->host->host_lock, flags);
950 pnode->cmd_qdepth += pnode->cmd_qdepth * 1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
951 LPFC_TGTQ_RAMPUP_PCENT / 100; 1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
952 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 pnode->last_change_time = jiffies; 1838 pnode->last_change_time = jiffies;
955 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1839 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1840 }
956 } 1841 }
957 1842
958 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1067 } 1952 }
1068} 1953}
1069 1954
1955/**
1956 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1957 * @vport: The virtual port for which this call is being executed.
1958 * @lpfc_cmd: The scsi command which needs to send.
1959 * @pnode: Pointer to lpfc_nodelist.
1960 *
1961 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1962 * to transfer.
1963 **/
1070static void 1964static void
1071lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 1965lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1072 struct lpfc_nodelist *pnode) 1966 struct lpfc_nodelist *pnode)
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1122 } else { 2016 } else {
1123 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2017 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1124 iocb_cmd->ulpPU = PARM_READ_CHECK; 2018 iocb_cmd->ulpPU = PARM_READ_CHECK;
1125 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1126 fcp_cmnd->fcpCntl3 = READ_DATA; 2019 fcp_cmnd->fcpCntl3 = READ_DATA;
1127 phba->fc4InputRequests++; 2020 phba->fc4InputRequests++;
1128 } 2021 }
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1133 fcp_cmnd->fcpCntl3 = 0; 2026 fcp_cmnd->fcpCntl3 = 0;
1134 phba->fc4ControlRequests++; 2027 phba->fc4ControlRequests++;
1135 } 2028 }
1136 if (phba->sli_rev == 3) 2029 if (phba->sli_rev == 3 &&
2030 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1137 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2031 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1138 /* 2032 /*
1139 * Finish initializing those IOCB fields that are independent 2033 * Finish initializing those IOCB fields that are independent
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1152 piocbq->vport = vport; 2046 piocbq->vport = vport;
1153} 2047}
1154 2048
2049/**
2050 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
2051 * @vport: The virtual port for which this call is being executed.
2052 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2053 * @lun: Logical unit number.
2054 * @task_mgmt_cmd: SCSI task management command.
2055 *
2056 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2057 *
2058 * Return codes:
2059 * 0 - Error
2060 * 1 - Success
2061 **/
1155static int 2062static int
1156lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2063lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1157 struct lpfc_scsi_buf *lpfc_cmd, 2064 struct lpfc_scsi_buf *lpfc_cmd,
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1178 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2085 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2086 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1180 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2087 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1181 if (vport->phba->sli_rev == 3) 2088 if (vport->phba->sli_rev == 3 &&
2089 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
1182 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2090 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1183 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2091 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1184 piocb->ulpContext = ndlp->nlp_rpi; 2092 piocb->ulpContext = ndlp->nlp_rpi;
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1201 return 1; 2109 return 1;
1202} 2110}
1203 2111
2112/**
2113 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
2114 * @phba: The Hba for which this call is being executed.
2115 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2116 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2117 *
2118 * This routine is IOCB completion routine for device reset and target reset
2119 * routine. This routine release scsi buffer associated with lpfc_cmd.
2120 **/
1204static void 2121static void
1205lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2122lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1206 struct lpfc_iocbq *cmdiocbq, 2123 struct lpfc_iocbq *cmdiocbq,
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1213 return; 2130 return;
1214} 2131}
1215 2132
2133/**
2134 * lpfc_scsi_tgt_reset: Target reset handler.
2135 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2136 * @vport: The virtual port for which this call is being executed.
2137 * @tgt_id: Target ID.
2138 * @lun: Lun number.
2139 * @rdata: Pointer to lpfc_rport_data.
2140 *
2141 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2142 *
2143 * Return Code:
2144 * 0x2003 - Error
2145 * 0x2002 - Success.
2146 **/
1216static int 2147static int
1217lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 2148lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1218 unsigned tgt_id, unsigned int lun, 2149 unsigned tgt_id, unsigned int lun,
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1266 return ret; 2197 return ret;
1267} 2198}
1268 2199
2200/**
2201 * lpfc_info: Info entry point of scsi_host_template data structure.
2202 * @host: The scsi host for which this call is being executed.
2203 *
2204 * This routine provides module information about hba.
2205 *
2206 * Reutrn code:
2207 * Pointer to char - Success.
2208 **/
1269const char * 2209const char *
1270lpfc_info(struct Scsi_Host *host) 2210lpfc_info(struct Scsi_Host *host)
1271{ 2211{
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
1295 return lpfcinfobuf; 2235 return lpfcinfobuf;
1296} 2236}
1297 2237
2238/**
2239 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
2240 * @phba: The Hba for which this call is being executed.
2241 *
2242 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2243 * The default value of cfg_poll_tmo is 10 milliseconds.
2244 **/
1298static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2245static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1299{ 2246{
1300 unsigned long poll_tmo_expires = 2247 unsigned long poll_tmo_expires =
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1305 poll_tmo_expires); 2252 poll_tmo_expires);
1306} 2253}
1307 2254
2255/**
2256 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
2257 * @phba: The Hba for which this call is being executed.
2258 *
2259 * This routine starts the fcp_poll_timer of @phba.
2260 **/
1308void lpfc_poll_start_timer(struct lpfc_hba * phba) 2261void lpfc_poll_start_timer(struct lpfc_hba * phba)
1309{ 2262{
1310 lpfc_poll_rearm_timer(phba); 2263 lpfc_poll_rearm_timer(phba);
1311} 2264}
1312 2265
2266/**
2267 * lpfc_poll_timeout: Restart polling timer.
2268 * @ptr: Map to lpfc_hba data structure pointer.
2269 *
2270 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2271 * and FCP Ring interrupt is disable.
2272 **/
2273
1313void lpfc_poll_timeout(unsigned long ptr) 2274void lpfc_poll_timeout(unsigned long ptr)
1314{ 2275{
1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2276 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
1321 } 2282 }
1322} 2283}
1323 2284
2285/**
2286 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
2287 * structure.
2288 * @cmnd: Pointer to scsi_cmnd data structure.
2289 * @done: Pointer to done routine.
2290 *
2291 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2292 * This routine prepares an IOCB from scsi command and provides to firmware.
2293 * The @done callback is invoked after driver finished processing the command.
2294 *
2295 * Return value :
2296 * 0 - Success
2297 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2298 **/
1324static int 2299static int
1325lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2300lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1326{ 2301{
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1340 goto out_fail_command; 2315 goto out_fail_command;
1341 } 2316 }
1342 2317
2318 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2319 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2320
2321 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2322 "str=%s without registering for BlockGuard - "
2323 "Rejecting command\n",
2324 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2325 dif_op_str[scsi_get_prot_op(cmnd)]);
2326 goto out_fail_command;
2327 }
2328
1343 /* 2329 /*
1344 * Catch race where our node has transitioned, but the 2330 * Catch race where our node has transitioned, but the
1345 * transport is still transitioning. 2331 * transport is still transitioning.
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1348 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2334 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1349 goto out_fail_command; 2335 goto out_fail_command;
1350 } 2336 }
1351 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 2337 if (vport->cfg_max_scsicmpl_time &&
2338 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1352 goto out_host_busy; 2339 goto out_host_busy;
1353 2340
1354 lpfc_cmd = lpfc_get_scsi_buf(phba); 2341 lpfc_cmd = lpfc_get_scsi_buf(phba);
1355 if (lpfc_cmd == NULL) { 2342 if (lpfc_cmd == NULL) {
1356 lpfc_adjust_queue_depth(phba); 2343 lpfc_rampdown_queue_depth(phba);
1357 2344
1358 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1359 "0707 driver's buffer pool is empty, " 2346 "0707 driver's buffer pool is empty, "
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1361 goto out_host_busy; 2348 goto out_host_busy;
1362 } 2349 }
1363 2350
1364 lpfc_cmd->start_time = jiffies;
1365 /* 2351 /*
1366 * Store the midlayer's command structure for the completion phase 2352 * Store the midlayer's command structure for the completion phase
1367 * and complete the command initialization. 2353 * and complete the command initialization.
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1373 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 2359 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1374 cmnd->scsi_done = done; 2360 cmnd->scsi_done = done;
1375 2361
1376 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2362 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2363 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2364 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2365 "str=%s\n",
2366 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2367 dif_op_str[scsi_get_prot_op(cmnd)]);
2368 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2369 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2370 "%02x %02x %02x %02x %02x \n",
2371 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2372 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2373 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2374 cmnd->cmnd[9]);
2375 if (cmnd->cmnd[0] == READ_10)
2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2377 "9035 BLKGRD: READ @ sector %llu, "
2378 "count %lu\n",
2379 (unsigned long long)scsi_get_lba(cmnd),
2380 cmnd->request->nr_sectors);
2381 else if (cmnd->cmnd[0] == WRITE_10)
2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2383 "9036 BLKGRD: WRITE @ sector %llu, "
2384 "count %lu cmd=%p\n",
2385 (unsigned long long)scsi_get_lba(cmnd),
2386 cmnd->request->nr_sectors,
2387 cmnd);
2388
2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2390 } else {
2391 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2392 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2393 " str=%s\n",
2394 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2395 dif_op_str[scsi_get_prot_op(cmnd)]);
2396 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2397 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2398 "%02x %02x %02x %02x %02x \n",
2399 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2400 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2401 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2402 cmnd->cmnd[9]);
2403 if (cmnd->cmnd[0] == READ_10)
2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2405 "9040 dbg: READ @ sector %llu, "
2406 "count %lu\n",
2407 (unsigned long long)scsi_get_lba(cmnd),
2408 cmnd->request->nr_sectors);
2409 else if (cmnd->cmnd[0] == WRITE_10)
2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2411 "9041 dbg: WRITE @ sector %llu, "
2412 "count %lu cmd=%p\n",
2413 (unsigned long long)scsi_get_lba(cmnd),
2414 cmnd->request->nr_sectors, cmnd);
2415 else
2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2417 "9042 dbg: parser not implemented\n");
2418 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2419 }
2420
1377 if (err) 2421 if (err)
1378 goto out_host_busy_free_buf; 2422 goto out_host_busy_free_buf;
1379 2423
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1382 atomic_inc(&ndlp->cmd_pending); 2426 atomic_inc(&ndlp->cmd_pending);
1383 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 2427 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1384 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2428 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1385 if (err) 2429 if (err) {
2430 atomic_dec(&ndlp->cmd_pending);
1386 goto out_host_busy_free_buf; 2431 goto out_host_busy_free_buf;
1387 2432 }
1388 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2433 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1389 lpfc_sli_poll_fcp_ring(phba); 2434 lpfc_sli_poll_fcp_ring(phba);
1390 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2435 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1394 return 0; 2439 return 0;
1395 2440
1396 out_host_busy_free_buf: 2441 out_host_busy_free_buf:
1397 atomic_dec(&ndlp->cmd_pending);
1398 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2442 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1399 lpfc_release_scsi_buf(phba, lpfc_cmd); 2443 lpfc_release_scsi_buf(phba, lpfc_cmd);
1400 out_host_busy: 2444 out_host_busy:
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1405 return 0; 2449 return 0;
1406} 2450}
1407 2451
2452/**
2453 * lpfc_block_error_handler: Routine to block error handler.
2454 * @cmnd: Pointer to scsi_cmnd data structure.
2455 *
2456 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2457 **/
1408static void 2458static void
1409lpfc_block_error_handler(struct scsi_cmnd *cmnd) 2459lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1410{ 2460{
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1421 return; 2471 return;
1422} 2472}
1423 2473
2474/**
2475 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
2476 *structure.
2477 * @cmnd: Pointer to scsi_cmnd data structure.
2478 *
2479 * This routine aborts @cmnd pending in base driver.
2480 *
2481 * Return code :
2482 * 0x2003 - Error
2483 * 0x2002 - Success
2484 **/
1424static int 2485static int
1425lpfc_abort_handler(struct scsi_cmnd *cmnd) 2486lpfc_abort_handler(struct scsi_cmnd *cmnd)
1426{ 2487{
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
1516 return ret; 2577 return ret;
1517} 2578}
1518 2579
2580/**
2581 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
2582 *data structure.
2583 * @cmnd: Pointer to scsi_cmnd data structure.
2584 *
2585 * This routine does a device reset by sending a TARGET_RESET task management
2586 * command.
2587 *
2588 * Return code :
2589 * 0x2003 - Error
2590 * 0ex2002 - Success
2591 **/
1519static int 2592static int
1520lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 2593lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1521{ 2594{
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1560 fc_get_event_number(), 2633 fc_get_event_number(),
1561 sizeof(scsi_event), 2634 sizeof(scsi_event),
1562 (char *)&scsi_event, 2635 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2636 LPFC_NL_VENDOR_ID);
1564 2637
1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 2638 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2639 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1633 return ret; 2706 return ret;
1634} 2707}
1635 2708
2709/**
2710 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
2711 * Template data structure.
2712 * @cmnd: Pointer to scsi_cmnd data structure.
2713 *
2714 * This routine does target reset to all target on @cmnd->device->host.
2715 *
2716 * Return Code:
2717 * 0x2003 - Error
2718 * 0x2002 - Success
2719 **/
1636static int 2720static int
1637lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1638{ 2722{
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1657 fc_get_event_number(), 2741 fc_get_event_number(),
1658 sizeof(scsi_event), 2742 sizeof(scsi_event),
1659 (char *)&scsi_event, 2743 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2744 LPFC_NL_VENDOR_ID);
1661 2745
1662 lpfc_block_error_handler(cmnd); 2746 lpfc_block_error_handler(cmnd);
1663 /* 2747 /*
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1723 return ret; 2807 return ret;
1724} 2808}
1725 2809
2810/**
2811 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
2812 * structure.
2813 * @sdev: Pointer to scsi_device.
2814 *
2815 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2816 * globally available list of scsi buffers. This routine also makes sure scsi
2817 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2818 * of scsi buffer exists for the lifetime of the driver.
2819 *
2820 * Return codes:
2821 * non-0 - Error
2822 * 0 - Success
2823 **/
1726static int 2824static int
1727lpfc_slave_alloc(struct scsi_device *sdev) 2825lpfc_slave_alloc(struct scsi_device *sdev)
1728{ 2826{
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1784 return 0; 2882 return 0;
1785} 2883}
1786 2884
2885/**
2886 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2887 * structure.
2888 * @sdev: Pointer to scsi_device.
2889 *
2890 * This routine configures following items
2891 * - Tag command queuing support for @sdev if supported.
2892 * - Dev loss time out value of fc_rport.
2893 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2894 *
2895 * Return codes:
2896 * 0 - Success
2897 **/
1787static int 2898static int
1788lpfc_slave_configure(struct scsi_device *sdev) 2899lpfc_slave_configure(struct scsi_device *sdev)
1789{ 2900{
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
1813 return 0; 2924 return 0;
1814} 2925}
1815 2926
2927/**
2928 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2929 * @sdev: Pointer to scsi_device.
2930 *
2931 * This routine sets @sdev hostatdata filed to null.
2932 **/
1816static void 2933static void
1817lpfc_slave_destroy(struct scsi_device *sdev) 2934lpfc_slave_destroy(struct scsi_device *sdev)
1818{ 2935{
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 437f182e2322..c7c440d5fa29 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -124,6 +124,8 @@ struct lpfc_scsi_buf {
124 uint32_t seg_cnt; /* Number of scatter-gather segments returned by 124 uint32_t seg_cnt; /* Number of scatter-gather segments returned by
125 * dma_map_sg. The driver needs this for calls 125 * dma_map_sg. The driver needs this for calls
126 * to dma_unmap_sg. */ 126 * to dma_unmap_sg. */
127 uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
128
127 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ 129 dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
128 130
129 /* 131 /*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8ab5babdeebc..01dfdc8696f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -542,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
542 */ 542 */
543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
544 544
545
545 if (pring->ringno == LPFC_ELS_RING) { 546 if (pring->ringno == LPFC_ELS_RING) {
546 lpfc_debugfs_slow_ring_trc(phba, 547 lpfc_debugfs_slow_ring_trc(phba,
547 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 548 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
@@ -1259,68 +1260,6 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1259} 1260}
1260 1261
1261/** 1262/**
1262 * lpfc_sli_replace_hbqbuff: Replace the HBQ buffer with a new buffer.
1263 * @phba: Pointer to HBA context object.
1264 * @tag: Tag for the HBQ buffer.
1265 *
1266 * This function is called from unsolicited event handler code path to get the
1267 * HBQ buffer associated with an unsolicited iocb. This function is called with
1268 * no lock held. It returns the buffer associated with the given tag and posts
1269 * another buffer to the firmware. Note that the new buffer must be allocated
1270 * before taking the hbalock and that the hba lock must be held until it is
1271 * finished with the hbq entry swap.
1272 **/
1273static struct lpfc_dmabuf *
1274lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1275{
1276 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
1277 uint32_t hbqno;
1278 void *virt; /* virtual address ptr */
1279 dma_addr_t phys; /* mapped address */
1280 unsigned long flags;
1281
1282 hbqno = tag >> 16;
1283 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1284 /* Check whether HBQ is still in use */
1285 spin_lock_irqsave(&phba->hbalock, flags);
1286 if (!phba->hbq_in_use) {
1287 if (new_hbq_entry)
1288 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1289 new_hbq_entry);
1290 spin_unlock_irqrestore(&phba->hbalock, flags);
1291 return NULL;
1292 }
1293
1294 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1295 if (hbq_entry == NULL) {
1296 if (new_hbq_entry)
1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1298 new_hbq_entry);
1299 spin_unlock_irqrestore(&phba->hbalock, flags);
1300 return NULL;
1301 }
1302 list_del(&hbq_entry->dbuf.list);
1303
1304 if (new_hbq_entry == NULL) {
1305 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1306 spin_unlock_irqrestore(&phba->hbalock, flags);
1307 return &hbq_entry->dbuf;
1308 }
1309 new_hbq_entry->tag = -1;
1310 phys = new_hbq_entry->dbuf.phys;
1311 virt = new_hbq_entry->dbuf.virt;
1312 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
1313 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
1314 hbq_entry->dbuf.phys = phys;
1315 hbq_entry->dbuf.virt = virt;
1316 lpfc_sli_free_hbq(phba, hbq_entry);
1317 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
1318 spin_unlock_irqrestore(&phba->hbalock, flags);
1319
1320 return &new_hbq_entry->dbuf;
1321}
1322
1323/**
1324 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. 1263 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
1325 * @phba: Pointer to HBA context object. 1264 * @phba: Pointer to HBA context object.
1326 * @pring: Pointer to driver SLI ring object. 1265 * @pring: Pointer to driver SLI ring object.
@@ -1334,13 +1273,17 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
1334 **/ 1273 **/
1335static struct lpfc_dmabuf * 1274static struct lpfc_dmabuf *
1336lpfc_sli_get_buff(struct lpfc_hba *phba, 1275lpfc_sli_get_buff(struct lpfc_hba *phba,
1337 struct lpfc_sli_ring *pring, 1276 struct lpfc_sli_ring *pring,
1338 uint32_t tag) 1277 uint32_t tag)
1339{ 1278{
1279 struct hbq_dmabuf *hbq_entry;
1280
1340 if (tag & QUE_BUFTAG_BIT) 1281 if (tag & QUE_BUFTAG_BIT)
1341 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1282 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1342 else 1283 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1343 return lpfc_sli_replace_hbqbuff(phba, tag); 1284 if (!hbq_entry)
1285 return NULL;
1286 return &hbq_entry->dbuf;
1344} 1287}
1345 1288
1346 1289
@@ -1372,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1372 match = 0; 1315 match = 0;
1373 irsp = &(saveq->iocb); 1316 irsp = &(saveq->iocb);
1374 1317
1375 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
1376 return 1;
1377 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1318 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1378 if (pring->lpfc_sli_rcv_async_status) 1319 if (pring->lpfc_sli_rcv_async_status)
1379 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1320 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@@ -1982,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1982 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1923 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1983 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1924 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1984 spin_unlock_irqrestore(&phba->hbalock, iflag); 1925 spin_unlock_irqrestore(&phba->hbalock, iflag);
1985 lpfc_adjust_queue_depth(phba); 1926 lpfc_rampdown_queue_depth(phba);
1986 spin_lock_irqsave(&phba->hbalock, iflag); 1927 spin_lock_irqsave(&phba->hbalock, iflag);
1987 } 1928 }
1988 1929
@@ -2225,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2225 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2166 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2226 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2167 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2227 spin_unlock_irqrestore(&phba->hbalock, iflag); 2168 spin_unlock_irqrestore(&phba->hbalock, iflag);
2228 lpfc_adjust_queue_depth(phba); 2169 lpfc_rampdown_queue_depth(phba);
2229 spin_lock_irqsave(&phba->hbalock, iflag); 2170 spin_lock_irqsave(&phba->hbalock, iflag);
2230 } 2171 }
2231 2172
@@ -2790,7 +2731,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2790{ 2731{
2791 MAILBOX_t *mb; 2732 MAILBOX_t *mb;
2792 struct lpfc_sli *psli; 2733 struct lpfc_sli *psli;
2793 uint16_t skip_post;
2794 volatile uint32_t word0; 2734 volatile uint32_t word0;
2795 void __iomem *to_slim; 2735 void __iomem *to_slim;
2796 2736
@@ -2815,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2815 readl(to_slim); /* flush */ 2755 readl(to_slim); /* flush */
2816 2756
2817 /* Only skip post after fc_ffinit is completed */ 2757 /* Only skip post after fc_ffinit is completed */
2818 if (phba->pport->port_state) { 2758 if (phba->pport->port_state)
2819 skip_post = 1;
2820 word0 = 1; /* This is really setting up word1 */ 2759 word0 = 1; /* This is really setting up word1 */
2821 } else { 2760 else
2822 skip_post = 0;
2823 word0 = 0; /* This is really setting up word1 */ 2761 word0 = 0; /* This is really setting up word1 */
2824 }
2825 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2762 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2826 writel(*(uint32_t *) mb, to_slim); 2763 writel(*(uint32_t *) mb, to_slim);
2827 readl(to_slim); /* flush */ 2764 readl(to_slim); /* flush */
@@ -2835,10 +2772,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2835 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2772 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2836 psli->stats_start = get_seconds(); 2773 psli->stats_start = get_seconds();
2837 2774
2838 if (skip_post) 2775 /* Give the INITFF and Post time to settle. */
2839 mdelay(100); 2776 mdelay(100);
2840 else
2841 mdelay(2000);
2842 2777
2843 lpfc_hba_down_post(phba); 2778 lpfc_hba_down_post(phba);
2844 2779
@@ -3084,7 +3019,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3084 spin_unlock_irq(&phba->hbalock); 3019 spin_unlock_irq(&phba->hbalock);
3085 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3020 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3086 lpfc_sli_brdrestart(phba); 3021 lpfc_sli_brdrestart(phba);
3087 msleep(2500);
3088 rc = lpfc_sli_chipset_init(phba); 3022 rc = lpfc_sli_chipset_init(phba);
3089 if (rc) 3023 if (rc)
3090 break; 3024 break;
@@ -3111,7 +3045,8 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3111 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3045 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3112 LPFC_SLI3_HBQ_ENABLED | 3046 LPFC_SLI3_HBQ_ENABLED |
3113 LPFC_SLI3_CRP_ENABLED | 3047 LPFC_SLI3_CRP_ENABLED |
3114 LPFC_SLI3_INB_ENABLED); 3048 LPFC_SLI3_INB_ENABLED |
3049 LPFC_SLI3_BG_ENABLED);
3115 if (rc != MBX_SUCCESS) { 3050 if (rc != MBX_SUCCESS) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "0442 Adapter failed to init, mbxCmd x%x " 3052 "0442 Adapter failed to init, mbxCmd x%x "
@@ -3144,17 +3079,29 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3144 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3079 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3145 if (pmb->mb.un.varCfgPort.ginb) { 3080 if (pmb->mb.un.varCfgPort.ginb) {
3146 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3081 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3082 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3147 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3083 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3148 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; 3084 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3149 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; 3085 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3150 phba->inb_last_counter = 3086 phba->inb_last_counter =
3151 phba->mbox->us.s3_inb_pgp.counter; 3087 phba->mbox->us.s3_inb_pgp.counter;
3152 } else { 3088 } else {
3089 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3153 phba->port_gp = phba->mbox->us.s3_pgp.port; 3090 phba->port_gp = phba->mbox->us.s3_pgp.port;
3154 phba->inb_ha_copy = NULL; 3091 phba->inb_ha_copy = NULL;
3155 phba->inb_counter = NULL; 3092 phba->inb_counter = NULL;
3156 } 3093 }
3094
3095 if (phba->cfg_enable_bg) {
3096 if (pmb->mb.un.varCfgPort.gbg)
3097 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3098 else
3099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3100 "0443 Adapter did not grant "
3101 "BlockGuard\n");
3102 }
3157 } else { 3103 } else {
3104 phba->hbq_get = NULL;
3158 phba->port_gp = phba->mbox->us.s2.port; 3105 phba->port_gp = phba->mbox->us.s2.port;
3159 phba->inb_ha_copy = NULL; 3106 phba->inb_ha_copy = NULL;
3160 phba->inb_counter = NULL; 3107 phba->inb_counter = NULL;
@@ -3305,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3305 struct lpfc_sli *psli = &phba->sli; 3252 struct lpfc_sli *psli = &phba->sli;
3306 struct lpfc_sli_ring *pring; 3253 struct lpfc_sli_ring *pring;
3307 3254
3308 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
3309 return;
3310 }
3311
3312 /* Mbox cmd <mbxCommand> timeout */ 3255 /* Mbox cmd <mbxCommand> timeout */
3313 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3256 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3314 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 3257 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -4005,7 +3948,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
4005 shost = lpfc_shost_from_vport(phba->pport); 3948 shost = lpfc_shost_from_vport(phba->pport);
4006 fc_host_post_vendor_event(shost, fc_get_event_number(), 3949 fc_host_post_vendor_event(shost, fc_get_event_number(),
4007 sizeof(temp_event_data), (char *) &temp_event_data, 3950 sizeof(temp_event_data), (char *) &temp_event_data,
4008 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3951 LPFC_NL_VENDOR_ID);
4009 3952
4010} 3953}
4011 3954
@@ -5184,6 +5127,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5184{ 5127{
5185 uint32_t ha_copy; 5128 uint32_t ha_copy;
5186 5129
5130 /* If PCI channel is offline, don't process it */
5131 if (unlikely(pci_channel_offline(phba->pcidev)))
5132 return 0;
5133
5187 /* If somebody is waiting to handle an eratt, don't process it 5134 /* If somebody is waiting to handle an eratt, don't process it
5188 * here. The brdkill function will do this. 5135 * here. The brdkill function will do this.
5189 */ 5136 */
@@ -5242,6 +5189,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5242 uint32_t ha_copy; 5189 uint32_t ha_copy;
5243 uint32_t work_ha_copy; 5190 uint32_t work_ha_copy;
5244 unsigned long status; 5191 unsigned long status;
5192 unsigned long iflag;
5245 uint32_t control; 5193 uint32_t control;
5246 5194
5247 MAILBOX_t *mbox, *pmbox; 5195 MAILBOX_t *mbox, *pmbox;
@@ -5274,7 +5222,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5274 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5222 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5275 return IRQ_NONE; 5223 return IRQ_NONE;
5276 /* Need to read HA REG for slow-path events */ 5224 /* Need to read HA REG for slow-path events */
5277 spin_lock(&phba->hbalock); 5225 spin_lock_irqsave(&phba->hbalock, iflag);
5278 ha_copy = readl(phba->HAregaddr); 5226 ha_copy = readl(phba->HAregaddr);
5279 /* If somebody is waiting to handle an eratt don't process it 5227 /* If somebody is waiting to handle an eratt don't process it
5280 * here. The brdkill function will do this. 5228 * here. The brdkill function will do this.
@@ -5294,7 +5242,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5294 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 5242 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
5295 phba->HAregaddr); 5243 phba->HAregaddr);
5296 readl(phba->HAregaddr); /* flush */ 5244 readl(phba->HAregaddr); /* flush */
5297 spin_unlock(&phba->hbalock); 5245 spin_unlock_irqrestore(&phba->hbalock, iflag);
5298 } else 5246 } else
5299 ha_copy = phba->ha_copy; 5247 ha_copy = phba->ha_copy;
5300 5248
@@ -5307,13 +5255,13 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5307 * Turn off Link Attention interrupts 5255 * Turn off Link Attention interrupts
5308 * until CLEAR_LA done 5256 * until CLEAR_LA done
5309 */ 5257 */
5310 spin_lock(&phba->hbalock); 5258 spin_lock_irqsave(&phba->hbalock, iflag);
5311 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 5259 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
5312 control = readl(phba->HCregaddr); 5260 control = readl(phba->HCregaddr);
5313 control &= ~HC_LAINT_ENA; 5261 control &= ~HC_LAINT_ENA;
5314 writel(control, phba->HCregaddr); 5262 writel(control, phba->HCregaddr);
5315 readl(phba->HCregaddr); /* flush */ 5263 readl(phba->HCregaddr); /* flush */
5316 spin_unlock(&phba->hbalock); 5264 spin_unlock_irqrestore(&phba->hbalock, iflag);
5317 } 5265 }
5318 else 5266 else
5319 work_ha_copy &= ~HA_LATT; 5267 work_ha_copy &= ~HA_LATT;
@@ -5328,7 +5276,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5328 (HA_RXMASK << (4*LPFC_ELS_RING))); 5276 (HA_RXMASK << (4*LPFC_ELS_RING)));
5329 status >>= (4*LPFC_ELS_RING); 5277 status >>= (4*LPFC_ELS_RING);
5330 if (status & HA_RXMASK) { 5278 if (status & HA_RXMASK) {
5331 spin_lock(&phba->hbalock); 5279 spin_lock_irqsave(&phba->hbalock, iflag);
5332 control = readl(phba->HCregaddr); 5280 control = readl(phba->HCregaddr);
5333 5281
5334 lpfc_debugfs_slow_ring_trc(phba, 5282 lpfc_debugfs_slow_ring_trc(phba,
@@ -5357,10 +5305,10 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5357 (uint32_t)((unsigned long) 5305 (uint32_t)((unsigned long)
5358 &phba->work_waitq)); 5306 &phba->work_waitq));
5359 } 5307 }
5360 spin_unlock(&phba->hbalock); 5308 spin_unlock_irqrestore(&phba->hbalock, iflag);
5361 } 5309 }
5362 } 5310 }
5363 spin_lock(&phba->hbalock); 5311 spin_lock_irqsave(&phba->hbalock, iflag);
5364 if (work_ha_copy & HA_ERATT) 5312 if (work_ha_copy & HA_ERATT)
5365 lpfc_sli_read_hs(phba); 5313 lpfc_sli_read_hs(phba);
5366 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 5314 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
@@ -5372,7 +5320,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5372 /* First check out the status word */ 5320 /* First check out the status word */
5373 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 5321 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
5374 if (pmbox->mbxOwner != OWN_HOST) { 5322 if (pmbox->mbxOwner != OWN_HOST) {
5375 spin_unlock(&phba->hbalock); 5323 spin_unlock_irqrestore(&phba->hbalock, iflag);
5376 /* 5324 /*
5377 * Stray Mailbox Interrupt, mbxCommand <cmd> 5325 * Stray Mailbox Interrupt, mbxCommand <cmd>
5378 * mbxStatus <status> 5326 * mbxStatus <status>
@@ -5389,7 +5337,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5389 work_ha_copy &= ~HA_MBATT; 5337 work_ha_copy &= ~HA_MBATT;
5390 } else { 5338 } else {
5391 phba->sli.mbox_active = NULL; 5339 phba->sli.mbox_active = NULL;
5392 spin_unlock(&phba->hbalock); 5340 spin_unlock_irqrestore(&phba->hbalock, iflag);
5393 phba->last_completion_time = jiffies; 5341 phba->last_completion_time = jiffies;
5394 del_timer(&phba->sli.mbox_tmo); 5342 del_timer(&phba->sli.mbox_tmo);
5395 if (pmb->mbox_cmpl) { 5343 if (pmb->mbox_cmpl) {
@@ -5438,14 +5386,18 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5438 goto send_current_mbox; 5386 goto send_current_mbox;
5439 } 5387 }
5440 } 5388 }
5441 spin_lock(&phba->pport->work_port_lock); 5389 spin_lock_irqsave(
5390 &phba->pport->work_port_lock,
5391 iflag);
5442 phba->pport->work_port_events &= 5392 phba->pport->work_port_events &=
5443 ~WORKER_MBOX_TMO; 5393 ~WORKER_MBOX_TMO;
5444 spin_unlock(&phba->pport->work_port_lock); 5394 spin_unlock_irqrestore(
5395 &phba->pport->work_port_lock,
5396 iflag);
5445 lpfc_mbox_cmpl_put(phba, pmb); 5397 lpfc_mbox_cmpl_put(phba, pmb);
5446 } 5398 }
5447 } else 5399 } else
5448 spin_unlock(&phba->hbalock); 5400 spin_unlock_irqrestore(&phba->hbalock, iflag);
5449 5401
5450 if ((work_ha_copy & HA_MBATT) && 5402 if ((work_ha_copy & HA_MBATT) &&
5451 (phba->sli.mbox_active == NULL)) { 5403 (phba->sli.mbox_active == NULL)) {
@@ -5461,9 +5413,9 @@ send_current_mbox:
5461 "MBX_SUCCESS"); 5413 "MBX_SUCCESS");
5462 } 5414 }
5463 5415
5464 spin_lock(&phba->hbalock); 5416 spin_lock_irqsave(&phba->hbalock, iflag);
5465 phba->work_ha |= work_ha_copy; 5417 phba->work_ha |= work_ha_copy;
5466 spin_unlock(&phba->hbalock); 5418 spin_unlock_irqrestore(&phba->hbalock, iflag);
5467 lpfc_worker_wake_up(phba); 5419 lpfc_worker_wake_up(phba);
5468 } 5420 }
5469 return IRQ_HANDLED; 5421 return IRQ_HANDLED;
@@ -5495,6 +5447,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5495 struct lpfc_hba *phba; 5447 struct lpfc_hba *phba;
5496 uint32_t ha_copy; 5448 uint32_t ha_copy;
5497 unsigned long status; 5449 unsigned long status;
5450 unsigned long iflag;
5498 5451
5499 /* Get the driver's phba structure from the dev_id and 5452 /* Get the driver's phba structure from the dev_id and
5500 * assume the HBA is not interrupting. 5453 * assume the HBA is not interrupting.
@@ -5520,11 +5473,11 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5520 /* Need to read HA REG for FCP ring and other ring events */ 5473 /* Need to read HA REG for FCP ring and other ring events */
5521 ha_copy = readl(phba->HAregaddr); 5474 ha_copy = readl(phba->HAregaddr);
5522 /* Clear up only attention source related to fast-path */ 5475 /* Clear up only attention source related to fast-path */
5523 spin_lock(&phba->hbalock); 5476 spin_lock_irqsave(&phba->hbalock, iflag);
5524 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5477 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
5525 phba->HAregaddr); 5478 phba->HAregaddr);
5526 readl(phba->HAregaddr); /* flush */ 5479 readl(phba->HAregaddr); /* flush */
5527 spin_unlock(&phba->hbalock); 5480 spin_unlock_irqrestore(&phba->hbalock, iflag);
5528 } else 5481 } else
5529 ha_copy = phba->ha_copy; 5482 ha_copy = phba->ha_copy;
5530 5483
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index cc43e9de22cc..7e32e95c5392 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.8" 21#define LPFC_DRIVER_VERSION "8.3.0"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a7de1cc02b40..63b54c66756c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -288,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
288 int vpi; 288 int vpi;
289 int rc = VPORT_ERROR; 289 int rc = VPORT_ERROR;
290 int status; 290 int status;
291 int size;
292 291
293 if ((phba->sli_rev < 3) || 292 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
294 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
295 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 293 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
296 "1808 Create VPORT failed: " 294 "1808 Create VPORT failed: "
297 "NPIV is not enabled: SLImode:%d\n", 295 "NPIV is not enabled: SLImode:%d\n",
@@ -351,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
351 349
352 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 350 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
353 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 351 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
354 size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
355 if (size) {
356 vport->vname = kzalloc(size+1, GFP_KERNEL);
357 if (!vport->vname) {
358 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
359 "1814 Create VPORT failed. "
360 "vname allocation failed.\n");
361 rc = VPORT_ERROR;
362 lpfc_free_vpi(phba, vpi);
363 destroy_port(vport);
364 goto error_out;
365 }
366 memcpy(vport->vname, fc_vport->symbolic_name, size+1);
367 }
368 if (fc_vport->node_name != 0) 352 if (fc_vport->node_name != 0)
369 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); 353 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
370 if (fc_vport->port_name != 0) 354 if (fc_vport->port_name != 0)
@@ -394,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
394 goto error_out; 378 goto error_out;
395 } 379 }
396 380
381 /* Create binary sysfs attribute for vport */
382 lpfc_alloc_sysfs_attr(vport);
383
397 *(struct lpfc_vport **)fc_vport->dd_data = vport; 384 *(struct lpfc_vport **)fc_vport->dd_data = vport;
398 vport->fc_vport = fc_vport; 385 vport->fc_vport = fc_vport;
399 386
@@ -405,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
405 } 392 }
406 393
407 if (disable) { 394 if (disable) {
395 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
408 rc = VPORT_OK; 396 rc = VPORT_OK;
409 goto out; 397 goto out;
410 } 398 }
@@ -587,8 +575,12 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
587 spin_lock_irq(&phba->hbalock); 575 spin_lock_irq(&phba->hbalock);
588 vport->load_flag |= FC_UNLOADING; 576 vport->load_flag |= FC_UNLOADING;
589 spin_unlock_irq(&phba->hbalock); 577 spin_unlock_irq(&phba->hbalock);
590 kfree(vport->vname); 578
579 lpfc_free_sysfs_attr(vport);
580
591 lpfc_debugfs_terminate(vport); 581 lpfc_debugfs_terminate(vport);
582
583 /* Remove FC host and then SCSI host with the vport */
592 fc_remove_host(lpfc_shost_from_vport(vport)); 584 fc_remove_host(lpfc_shost_from_vport(vport));
593 scsi_remove_host(lpfc_shost_from_vport(vport)); 585 scsi_remove_host(lpfc_shost_from_vport(vport));
594 586
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 887682a24e36..c24e86f07804 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -53,7 +53,8 @@ struct mac_esp_priv {
53 void __iomem *pdma_io; 53 void __iomem *pdma_io;
54 int error; 54 int error;
55}; 55};
56static struct platform_device *internal_esp, *external_esp; 56static struct platform_device *internal_pdev, *external_pdev;
57static struct esp *esp_chips[2];
57 58
58#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 59#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
59 platform_get_drvdata((struct platform_device *) \ 60 platform_get_drvdata((struct platform_device *) \
@@ -170,7 +171,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
170 171
171#define MAC_ESP_PDMA_LOOP(operands) \ 172#define MAC_ESP_PDMA_LOOP(operands) \
172 asm volatile ( \ 173 asm volatile ( \
173 " tstw %2 \n" \ 174 " tstw %1 \n" \
174 " jbeq 20f \n" \ 175 " jbeq 20f \n" \
175 "1: movew " operands " \n" \ 176 "1: movew " operands " \n" \
176 "2: movew " operands " \n" \ 177 "2: movew " operands " \n" \
@@ -188,14 +189,14 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
188 "14: movew " operands " \n" \ 189 "14: movew " operands " \n" \
189 "15: movew " operands " \n" \ 190 "15: movew " operands " \n" \
190 "16: movew " operands " \n" \ 191 "16: movew " operands " \n" \
191 " subqw #1,%2 \n" \ 192 " subqw #1,%1 \n" \
192 " jbne 1b \n" \ 193 " jbne 1b \n" \
193 "20: tstw %3 \n" \ 194 "20: tstw %2 \n" \
194 " jbeq 30f \n" \ 195 " jbeq 30f \n" \
195 "21: movew " operands " \n" \ 196 "21: movew " operands " \n" \
196 " subqw #1,%3 \n" \ 197 " subqw #1,%2 \n" \
197 " jbne 21b \n" \ 198 " jbne 21b \n" \
198 "30: tstw %4 \n" \ 199 "30: tstw %3 \n" \
199 " jbeq 40f \n" \ 200 " jbeq 40f \n" \
200 "31: moveb " operands " \n" \ 201 "31: moveb " operands " \n" \
201 "32: nop \n" \ 202 "32: nop \n" \
@@ -223,8 +224,8 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
223 " .long 31b,40b \n" \ 224 " .long 31b,40b \n" \
224 " .long 32b,40b \n" \ 225 " .long 32b,40b \n" \
225 " .previous \n" \ 226 " .previous \n" \
226 : "+a" (addr) \ 227 : "+a" (addr), "+r" (count32), "+r" (count2) \
227 : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count)) 228 : "g" (count1), "a" (mep->pdma_io))
228 229
229static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, 230static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd) 231 u32 dma_count, int write, u8 cmd)
@@ -247,19 +248,20 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
247 do { 248 do {
248 unsigned int count32 = esp_count >> 5; 249 unsigned int count32 = esp_count >> 5;
249 unsigned int count2 = (esp_count & 0x1F) >> 1; 250 unsigned int count2 = (esp_count & 0x1F) >> 1;
251 unsigned int count1 = esp_count & 1;
250 unsigned int start_addr = addr; 252 unsigned int start_addr = addr;
251 253
252 if (mac_esp_wait_for_dreq(esp)) 254 if (mac_esp_wait_for_dreq(esp))
253 break; 255 break;
254 256
255 if (write) { 257 if (write) {
256 MAC_ESP_PDMA_LOOP("%1@,%0@+"); 258 MAC_ESP_PDMA_LOOP("%4@,%0@+");
257 259
258 esp_count -= addr - start_addr; 260 esp_count -= addr - start_addr;
259 } else { 261 } else {
260 unsigned int n; 262 unsigned int n;
261 263
262 MAC_ESP_PDMA_LOOP("%0@+,%1@"); 264 MAC_ESP_PDMA_LOOP("%0@+,%4@");
263 265
264 if (mac_esp_wait_for_empty_fifo(esp)) 266 if (mac_esp_wait_for_empty_fifo(esp))
265 break; 267 break;
@@ -442,6 +444,32 @@ static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
442 return dma_len > 0xFFFF ? 0xFFFF : dma_len; 444 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
443} 445}
444 446
447static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
448{
449 int got_intr;
450
451 /*
452 * This is an edge triggered IRQ, so we have to be careful to
453 * avoid missing a transition when it is shared by two ESP devices.
454 */
455
456 do {
457 got_intr = 0;
458 if (esp_chips[0] &&
459 (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
460 (void)scsi_esp_intr(irq, esp_chips[0]);
461 got_intr = 1;
462 }
463 if (esp_chips[1] &&
464 (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
465 (void)scsi_esp_intr(irq, esp_chips[1]);
466 got_intr = 1;
467 }
468 } while (got_intr);
469
470 return IRQ_HANDLED;
471}
472
445static struct esp_driver_ops mac_esp_ops = { 473static struct esp_driver_ops mac_esp_ops = {
446 .esp_write8 = mac_esp_write8, 474 .esp_write8 = mac_esp_write8,
447 .esp_read8 = mac_esp_read8, 475 .esp_read8 = mac_esp_read8,
@@ -556,10 +584,16 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
556 } 584 }
557 585
558 host->irq = IRQ_MAC_SCSI; 586 host->irq = IRQ_MAC_SCSI;
559 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP", 587 esp_chips[dev->id] = esp;
560 esp); 588 mb();
561 if (err < 0) 589 if (esp_chips[!dev->id] == NULL) {
562 goto fail_free_priv; 590 err = request_irq(host->irq, mac_scsi_esp_intr, 0,
591 "Mac ESP", NULL);
592 if (err < 0) {
593 esp_chips[dev->id] = NULL;
594 goto fail_free_priv;
595 }
596 }
563 597
564 err = scsi_esp_register(esp, &dev->dev); 598 err = scsi_esp_register(esp, &dev->dev);
565 if (err) 599 if (err)
@@ -568,7 +602,8 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
568 return 0; 602 return 0;
569 603
570fail_free_irq: 604fail_free_irq:
571 free_irq(host->irq, esp); 605 if (esp_chips[!dev->id] == NULL)
606 free_irq(host->irq, esp);
572fail_free_priv: 607fail_free_priv:
573 kfree(mep); 608 kfree(mep);
574fail_free_command_block: 609fail_free_command_block:
@@ -587,7 +622,9 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
587 622
588 scsi_esp_unregister(esp); 623 scsi_esp_unregister(esp);
589 624
590 free_irq(irq, esp); 625 esp_chips[dev->id] = NULL;
626 if (!(esp_chips[0] || esp_chips[1]))
627 free_irq(irq, NULL);
591 628
592 kfree(mep); 629 kfree(mep);
593 630
@@ -614,19 +651,18 @@ static int __init mac_esp_init(void)
614 if (err) 651 if (err)
615 return err; 652 return err;
616 653
617 internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0); 654 internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
618 if (internal_esp && platform_device_add(internal_esp)) { 655 if (internal_pdev && platform_device_add(internal_pdev)) {
619 platform_device_put(internal_esp); 656 platform_device_put(internal_pdev);
620 internal_esp = NULL; 657 internal_pdev = NULL;
621 } 658 }
622 659 external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
623 external_esp = platform_device_alloc(DRV_MODULE_NAME, 1); 660 if (external_pdev && platform_device_add(external_pdev)) {
624 if (external_esp && platform_device_add(external_esp)) { 661 platform_device_put(external_pdev);
625 platform_device_put(external_esp); 662 external_pdev = NULL;
626 external_esp = NULL;
627 } 663 }
628 664
629 if (internal_esp || external_esp) { 665 if (internal_pdev || external_pdev) {
630 return 0; 666 return 0;
631 } else { 667 } else {
632 platform_driver_unregister(&esp_mac_driver); 668 platform_driver_unregister(&esp_mac_driver);
@@ -638,13 +674,13 @@ static void __exit mac_esp_exit(void)
638{ 674{
639 platform_driver_unregister(&esp_mac_driver); 675 platform_driver_unregister(&esp_mac_driver);
640 676
641 if (internal_esp) { 677 if (internal_pdev) {
642 platform_device_unregister(internal_esp); 678 platform_device_unregister(internal_pdev);
643 internal_esp = NULL; 679 internal_pdev = NULL;
644 } 680 }
645 if (external_esp) { 681 if (external_pdev) {
646 platform_device_unregister(external_esp); 682 platform_device_unregister(external_pdev);
647 external_esp = NULL; 683 external_pdev = NULL;
648 } 684 }
649} 685}
650 686
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 22052bb7becb..d06ec5aa6924 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -3401,8 +3401,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3401 data->IrqNumber = pdev->irq; 3401 data->IrqNumber = pdev->irq;
3402 data->BaseAddress = pci_resource_start(pdev, 0); 3402 data->BaseAddress = pci_resource_start(pdev, 0);
3403 data->NumAddress = pci_resource_len (pdev, 0); 3403 data->NumAddress = pci_resource_len (pdev, 0);
3404 data->MmioAddress = ioremap_nocache(pci_resource_start(pdev, 1), 3404 data->MmioAddress = pci_ioremap_bar(pdev, 1);
3405 pci_resource_len (pdev, 1));
3406 data->MmioLength = pci_resource_len (pdev, 1); 3405 data->MmioLength = pci_resource_len (pdev, 1);
3407 3406
3408 pci_set_master(pdev); 3407 pci_set_master(pdev);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b6cd12b2e996..8cb9240596ab 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4294,8 +4294,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4294 error = -ENODEV; 4294 error = -ENODEV;
4295 4295
4296#if MEMORY_MAPPED_IO 4296#if MEMORY_MAPPED_IO
4297 ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1), 4297 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4298 pci_resource_len(ha->pdev, 1));
4299 if (!ha->mmpbase) { 4298 if (!ha->mmpbase) {
4300 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4299 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4301 goto error_free_response_ring; 4300 goto error_free_response_ring;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ed731968f15f..cd53627cc761 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
19 struct bin_attribute *bin_attr, 19 struct bin_attribute *bin_attr,
20 char *buf, loff_t off, size_t count) 20 char *buf, loff_t off, size_t count)
21{ 21{
22 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
23 struct device, kobj))); 23 struct device, kobj)));
24 struct qla_hw_data *ha = vha->hw;
24 25
25 if (ha->fw_dump_reading == 0) 26 if (ha->fw_dump_reading == 0)
26 return 0; 27 return 0;
@@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
34 struct bin_attribute *bin_attr, 35 struct bin_attribute *bin_attr,
35 char *buf, loff_t off, size_t count) 36 char *buf, loff_t off, size_t count)
36{ 37{
37 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
38 struct device, kobj))); 39 struct device, kobj)));
40 struct qla_hw_data *ha = vha->hw;
39 int reading; 41 int reading;
40 42
41 if (off != 0) 43 if (off != 0)
@@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
48 break; 50 break;
49 51
50 qla_printk(KERN_INFO, ha, 52 qla_printk(KERN_INFO, ha,
51 "Firmware dump cleared on (%ld).\n", ha->host_no); 53 "Firmware dump cleared on (%ld).\n", vha->host_no);
52 54
53 ha->fw_dump_reading = 0; 55 ha->fw_dump_reading = 0;
54 ha->fw_dumped = 0; 56 ha->fw_dumped = 0;
@@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
59 61
60 qla_printk(KERN_INFO, ha, 62 qla_printk(KERN_INFO, ha,
61 "Raw firmware dump ready for read on (%ld).\n", 63 "Raw firmware dump ready for read on (%ld).\n",
62 ha->host_no); 64 vha->host_no);
63 } 65 }
64 break; 66 break;
65 case 2: 67 case 2:
66 qla2x00_alloc_fw_dump(ha); 68 qla2x00_alloc_fw_dump(vha);
67 break; 69 break;
68 case 3: 70 case 3:
69 qla2x00_system_error(ha); 71 qla2x00_system_error(vha);
70 break; 72 break;
71 } 73 }
72 return (count); 74 return (count);
@@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
87 struct bin_attribute *bin_attr, 89 struct bin_attribute *bin_attr,
88 char *buf, loff_t off, size_t count) 90 char *buf, loff_t off, size_t count)
89{ 91{
90 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
91 struct device, kobj))); 93 struct device, kobj)));
94 struct qla_hw_data *ha = vha->hw;
92 95
93 if (!capable(CAP_SYS_ADMIN)) 96 if (!capable(CAP_SYS_ADMIN))
94 return 0; 97 return 0;
@@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
103 struct bin_attribute *bin_attr, 106 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count) 107 char *buf, loff_t off, size_t count)
105{ 108{
106 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 109 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
107 struct device, kobj))); 110 struct device, kobj)));
111 struct qla_hw_data *ha = vha->hw;
108 uint16_t cnt; 112 uint16_t cnt;
109 113
110 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 114 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
134 } 138 }
135 139
136 /* Write NVRAM. */ 140 /* Write NVRAM. */
137 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 141 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
138 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base, 142 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
139 count); 143 count);
140 144
141 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 145 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
142 146
143 return (count); 147 return (count);
144} 148}
@@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
158 struct bin_attribute *bin_attr, 162 struct bin_attribute *bin_attr,
159 char *buf, loff_t off, size_t count) 163 char *buf, loff_t off, size_t count)
160{ 164{
161 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 165 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
162 struct device, kobj))); 166 struct device, kobj)));
167 struct qla_hw_data *ha = vha->hw;
163 168
164 if (ha->optrom_state != QLA_SREADING) 169 if (ha->optrom_state != QLA_SREADING)
165 return 0; 170 return 0;
@@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
173 struct bin_attribute *bin_attr, 178 struct bin_attribute *bin_attr,
174 char *buf, loff_t off, size_t count) 179 char *buf, loff_t off, size_t count)
175{ 180{
176 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 181 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
177 struct device, kobj))); 182 struct device, kobj)));
183 struct qla_hw_data *ha = vha->hw;
178 184
179 if (ha->optrom_state != QLA_SWRITING) 185 if (ha->optrom_state != QLA_SWRITING)
180 return -EINVAL; 186 return -EINVAL;
@@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
203 struct bin_attribute *bin_attr, 209 struct bin_attribute *bin_attr,
204 char *buf, loff_t off, size_t count) 210 char *buf, loff_t off, size_t count)
205{ 211{
206 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 212 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
207 struct device, kobj))); 213 struct device, kobj)));
214 struct qla_hw_data *ha = vha->hw;
215
208 uint32_t start = 0; 216 uint32_t start = 0;
209 uint32_t size = ha->optrom_size; 217 uint32_t size = ha->optrom_size;
210 int val, valid; 218 int val, valid;
@@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
262 ha->optrom_region_start, ha->optrom_region_size)); 270 ha->optrom_region_start, ha->optrom_region_size));
263 271
264 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 272 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
265 ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 273 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
266 ha->optrom_region_start, ha->optrom_region_size); 274 ha->optrom_region_start, ha->optrom_region_size);
267 break; 275 break;
268 case 2: 276 case 2:
@@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
333 "Writing flash region -- 0x%x/0x%x.\n", 341 "Writing flash region -- 0x%x/0x%x.\n",
334 ha->optrom_region_start, ha->optrom_region_size)); 342 ha->optrom_region_start, ha->optrom_region_size));
335 343
336 ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 344 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
337 ha->optrom_region_start, ha->optrom_region_size); 345 ha->optrom_region_start, ha->optrom_region_size);
338 break; 346 break;
339 default: 347 default:
@@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
356 struct bin_attribute *bin_attr, 364 struct bin_attribute *bin_attr,
357 char *buf, loff_t off, size_t count) 365 char *buf, loff_t off, size_t count)
358{ 366{
359 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 367 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
360 struct device, kobj))); 368 struct device, kobj)));
369 struct qla_hw_data *ha = vha->hw;
361 370
362 if (!capable(CAP_SYS_ADMIN)) 371 if (!capable(CAP_SYS_ADMIN))
363 return 0; 372 return 0;
@@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
371 struct bin_attribute *bin_attr, 380 struct bin_attribute *bin_attr,
372 char *buf, loff_t off, size_t count) 381 char *buf, loff_t off, size_t count)
373{ 382{
374 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 383 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
375 struct device, kobj))); 384 struct device, kobj)));
385 struct qla_hw_data *ha = vha->hw;
376 386
377 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) 387 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
378 return 0; 388 return 0;
379 389
380 /* Write NVRAM. */ 390 /* Write NVRAM. */
381 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 391 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
382 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count); 392 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
383 393
384 return count; 394 return count;
385} 395}
@@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
399 struct bin_attribute *bin_attr, 409 struct bin_attribute *bin_attr,
400 char *buf, loff_t off, size_t count) 410 char *buf, loff_t off, size_t count)
401{ 411{
402 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 412 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
403 struct device, kobj))); 413 struct device, kobj)));
414 struct qla_hw_data *ha = vha->hw;
404 uint16_t iter, addr, offset; 415 uint16_t iter, addr, offset;
405 int rval; 416 int rval;
406 417
@@ -429,7 +440,7 @@ do_read:
429 offset = 0; 440 offset = 0;
430 } 441 }
431 442
432 rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset, 443 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
433 SFP_BLOCK_SIZE); 444 SFP_BLOCK_SIZE);
434 if (rval != QLA_SUCCESS) { 445 if (rval != QLA_SUCCESS) {
435 qla_printk(KERN_WARNING, ha, 446 qla_printk(KERN_WARNING, ha,
@@ -469,30 +480,31 @@ static struct sysfs_entry {
469}; 480};
470 481
471void 482void
472qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 483qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
473{ 484{
474 struct Scsi_Host *host = ha->host; 485 struct Scsi_Host *host = vha->host;
475 struct sysfs_entry *iter; 486 struct sysfs_entry *iter;
476 int ret; 487 int ret;
477 488
478 for (iter = bin_file_entries; iter->name; iter++) { 489 for (iter = bin_file_entries; iter->name; iter++) {
479 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 490 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
480 continue; 491 continue;
481 492
482 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 493 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
483 iter->attr); 494 iter->attr);
484 if (ret) 495 if (ret)
485 qla_printk(KERN_INFO, ha, 496 qla_printk(KERN_INFO, vha->hw,
486 "Unable to create sysfs %s binary attribute " 497 "Unable to create sysfs %s binary attribute "
487 "(%d).\n", iter->name, ret); 498 "(%d).\n", iter->name, ret);
488 } 499 }
489} 500}
490 501
491void 502void
492qla2x00_free_sysfs_attr(scsi_qla_host_t *ha) 503qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
493{ 504{
494 struct Scsi_Host *host = ha->host; 505 struct Scsi_Host *host = vha->host;
495 struct sysfs_entry *iter; 506 struct sysfs_entry *iter;
507 struct qla_hw_data *ha = vha->hw;
496 508
497 for (iter = bin_file_entries; iter->name; iter++) { 509 for (iter = bin_file_entries; iter->name; iter++) {
498 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 510 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
503 } 515 }
504 516
505 if (ha->beacon_blink_led == 1) 517 if (ha->beacon_blink_led == 1)
506 ha->isp_ops->beacon_off(ha); 518 ha->isp_ops->beacon_off(vha);
507} 519}
508 520
509/* Scsi_Host attributes. */ 521/* Scsi_Host attributes. */
@@ -519,22 +531,24 @@ static ssize_t
519qla2x00_fw_version_show(struct device *dev, 531qla2x00_fw_version_show(struct device *dev,
520 struct device_attribute *attr, char *buf) 532 struct device_attribute *attr, char *buf)
521{ 533{
522 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 534 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
523 char fw_str[30]; 535 struct qla_hw_data *ha = vha->hw;
536 char fw_str[128];
524 537
525 return snprintf(buf, PAGE_SIZE, "%s\n", 538 return snprintf(buf, PAGE_SIZE, "%s\n",
526 ha->isp_ops->fw_version_str(ha, fw_str)); 539 ha->isp_ops->fw_version_str(vha, fw_str));
527} 540}
528 541
529static ssize_t 542static ssize_t
530qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 543qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
531 char *buf) 544 char *buf)
532{ 545{
533 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 546 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
547 struct qla_hw_data *ha = vha->hw;
534 uint32_t sn; 548 uint32_t sn;
535 549
536 if (IS_FWI2_CAPABLE(ha)) { 550 if (IS_FWI2_CAPABLE(ha)) {
537 qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE); 551 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
538 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 552 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
539 } 553 }
540 554
@@ -547,15 +561,16 @@ static ssize_t
547qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 561qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
548 char *buf) 562 char *buf)
549{ 563{
550 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 564 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
551 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device); 565 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
552} 566}
553 567
554static ssize_t 568static ssize_t
555qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 569qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
556 char *buf) 570 char *buf)
557{ 571{
558 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 572 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
573 struct qla_hw_data *ha = vha->hw;
559 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 574 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
560 ha->product_id[0], ha->product_id[1], ha->product_id[2], 575 ha->product_id[0], ha->product_id[1], ha->product_id[2],
561 ha->product_id[3]); 576 ha->product_id[3]);
@@ -565,43 +580,44 @@ static ssize_t
565qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 580qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
566 char *buf) 581 char *buf)
567{ 582{
568 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 583 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
569 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number); 584 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
570} 585}
571 586
572static ssize_t 587static ssize_t
573qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 588qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
574 char *buf) 589 char *buf)
575{ 590{
576 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 591 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
577 return snprintf(buf, PAGE_SIZE, "%s\n", 592 return snprintf(buf, PAGE_SIZE, "%s\n",
578 ha->model_desc ? ha->model_desc: ""); 593 vha->hw->model_desc ? vha->hw->model_desc : "");
579} 594}
580 595
581static ssize_t 596static ssize_t
582qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 597qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
583 char *buf) 598 char *buf)
584{ 599{
585 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 600 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
586 char pci_info[30]; 601 char pci_info[30];
587 602
588 return snprintf(buf, PAGE_SIZE, "%s\n", 603 return snprintf(buf, PAGE_SIZE, "%s\n",
589 ha->isp_ops->pci_info_str(ha, pci_info)); 604 vha->hw->isp_ops->pci_info_str(vha, pci_info));
590} 605}
591 606
592static ssize_t 607static ssize_t
593qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 608qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
594 char *buf) 609 char *buf)
595{ 610{
596 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 611 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
612 struct qla_hw_data *ha = vha->hw;
597 int len = 0; 613 int len = 0;
598 614
599 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 615 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
600 atomic_read(&ha->loop_state) == LOOP_DEAD) 616 atomic_read(&vha->loop_state) == LOOP_DEAD)
601 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 617 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
602 else if (atomic_read(&ha->loop_state) != LOOP_READY || 618 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
603 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || 619 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
604 test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) 620 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
605 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 621 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
606 else { 622 else {
607 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 623 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -632,10 +648,10 @@ static ssize_t
632qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 648qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
633 char *buf) 649 char *buf)
634{ 650{
635 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 651 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
636 int len = 0; 652 int len = 0;
637 653
638 switch (ha->zio_mode) { 654 switch (vha->hw->zio_mode) {
639 case QLA_ZIO_MODE_6: 655 case QLA_ZIO_MODE_6:
640 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 656 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
641 break; 657 break;
@@ -650,7 +666,8 @@ static ssize_t
650qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 666qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
651 const char *buf, size_t count) 667 const char *buf, size_t count)
652{ 668{
653 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 669 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
670 struct qla_hw_data *ha = vha->hw;
654 int val = 0; 671 int val = 0;
655 uint16_t zio_mode; 672 uint16_t zio_mode;
656 673
@@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
668 /* Update per-hba values and queue a reset. */ 685 /* Update per-hba values and queue a reset. */
669 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 686 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
670 ha->zio_mode = zio_mode; 687 ha->zio_mode = zio_mode;
671 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
672 } 689 }
673 return strlen(buf); 690 return strlen(buf);
674} 691}
@@ -677,16 +694,16 @@ static ssize_t
677qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 694qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
678 char *buf) 695 char *buf)
679{ 696{
680 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 697 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
681 698
682 return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100); 699 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
683} 700}
684 701
685static ssize_t 702static ssize_t
686qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 703qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
687 const char *buf, size_t count) 704 const char *buf, size_t count)
688{ 705{
689 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 706 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
690 int val = 0; 707 int val = 0;
691 uint16_t zio_timer; 708 uint16_t zio_timer;
692 709
@@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
696 return -ERANGE; 713 return -ERANGE;
697 714
698 zio_timer = (uint16_t)(val / 100); 715 zio_timer = (uint16_t)(val / 100);
699 ha->zio_timer = zio_timer; 716 vha->hw->zio_timer = zio_timer;
700 717
701 return strlen(buf); 718 return strlen(buf);
702} 719}
@@ -705,10 +722,10 @@ static ssize_t
705qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 722qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
706 char *buf) 723 char *buf)
707{ 724{
708 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 725 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
709 int len = 0; 726 int len = 0;
710 727
711 if (ha->beacon_blink_led) 728 if (vha->hw->beacon_blink_led)
712 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 729 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
713 else 730 else
714 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 731 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@@ -719,14 +736,15 @@ static ssize_t
719qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 736qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
720 const char *buf, size_t count) 737 const char *buf, size_t count)
721{ 738{
722 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 739 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
740 struct qla_hw_data *ha = vha->hw;
723 int val = 0; 741 int val = 0;
724 int rval; 742 int rval;
725 743
726 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 744 if (IS_QLA2100(ha) || IS_QLA2200(ha))
727 return -EPERM; 745 return -EPERM;
728 746
729 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { 747 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
730 qla_printk(KERN_WARNING, ha, 748 qla_printk(KERN_WARNING, ha,
731 "Abort ISP active -- ignoring beacon request.\n"); 749 "Abort ISP active -- ignoring beacon request.\n");
732 return -EBUSY; 750 return -EBUSY;
@@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
736 return -EINVAL; 754 return -EINVAL;
737 755
738 if (val) 756 if (val)
739 rval = ha->isp_ops->beacon_on(ha); 757 rval = ha->isp_ops->beacon_on(vha);
740 else 758 else
741 rval = ha->isp_ops->beacon_off(ha); 759 rval = ha->isp_ops->beacon_off(vha);
742 760
743 if (rval != QLA_SUCCESS) 761 if (rval != QLA_SUCCESS)
744 count = 0; 762 count = 0;
@@ -750,8 +768,8 @@ static ssize_t
750qla2x00_optrom_bios_version_show(struct device *dev, 768qla2x00_optrom_bios_version_show(struct device *dev,
751 struct device_attribute *attr, char *buf) 769 struct device_attribute *attr, char *buf)
752{ 770{
753 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 771 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
754 772 struct qla_hw_data *ha = vha->hw;
755 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 773 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
756 ha->bios_revision[0]); 774 ha->bios_revision[0]);
757} 775}
@@ -760,8 +778,8 @@ static ssize_t
760qla2x00_optrom_efi_version_show(struct device *dev, 778qla2x00_optrom_efi_version_show(struct device *dev,
761 struct device_attribute *attr, char *buf) 779 struct device_attribute *attr, char *buf)
762{ 780{
763 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 781 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
764 782 struct qla_hw_data *ha = vha->hw;
765 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 783 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
766 ha->efi_revision[0]); 784 ha->efi_revision[0]);
767} 785}
@@ -770,8 +788,8 @@ static ssize_t
770qla2x00_optrom_fcode_version_show(struct device *dev, 788qla2x00_optrom_fcode_version_show(struct device *dev,
771 struct device_attribute *attr, char *buf) 789 struct device_attribute *attr, char *buf)
772{ 790{
773 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 791 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
774 792 struct qla_hw_data *ha = vha->hw;
775 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 793 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
776 ha->fcode_revision[0]); 794 ha->fcode_revision[0]);
777} 795}
@@ -780,8 +798,8 @@ static ssize_t
780qla2x00_optrom_fw_version_show(struct device *dev, 798qla2x00_optrom_fw_version_show(struct device *dev,
781 struct device_attribute *attr, char *buf) 799 struct device_attribute *attr, char *buf)
782{ 800{
783 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 801 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
784 802 struct qla_hw_data *ha = vha->hw;
785 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 803 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
786 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 804 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
787 ha->fw_revision[3]); 805 ha->fw_revision[3]);
@@ -791,8 +809,8 @@ static ssize_t
791qla2x00_total_isp_aborts_show(struct device *dev, 809qla2x00_total_isp_aborts_show(struct device *dev,
792 struct device_attribute *attr, char *buf) 810 struct device_attribute *attr, char *buf)
793{ 811{
794 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 812 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
795 813 struct qla_hw_data *ha = vha->hw;
796 return snprintf(buf, PAGE_SIZE, "%d\n", 814 return snprintf(buf, PAGE_SIZE, "%d\n",
797 ha->qla_stats.total_isp_aborts); 815 ha->qla_stats.total_isp_aborts);
798} 816}
@@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
848static void 866static void
849qla2x00_get_host_port_id(struct Scsi_Host *shost) 867qla2x00_get_host_port_id(struct Scsi_Host *shost)
850{ 868{
851 scsi_qla_host_t *ha = shost_priv(shost); 869 scsi_qla_host_t *vha = shost_priv(shost);
852 870
853 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 871 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
854 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 872 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
855} 873}
856 874
857static void 875static void
858qla2x00_get_host_speed(struct Scsi_Host *shost) 876qla2x00_get_host_speed(struct Scsi_Host *shost)
859{ 877{
860 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 878 struct qla_hw_data *ha = ((struct scsi_qla_host *)
879 (shost_priv(shost)))->hw;
861 u32 speed = FC_PORTSPEED_UNKNOWN; 880 u32 speed = FC_PORTSPEED_UNKNOWN;
862 881
863 switch (ha->link_data_rate) { 882 switch (ha->link_data_rate) {
@@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
880static void 899static void
881qla2x00_get_host_port_type(struct Scsi_Host *shost) 900qla2x00_get_host_port_type(struct Scsi_Host *shost)
882{ 901{
883 scsi_qla_host_t *ha = shost_priv(shost); 902 scsi_qla_host_t *vha = shost_priv(shost);
884 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 903 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
885 904
886 if (ha->parent) { 905 if (vha->vp_idx) {
887 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 906 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
888 return; 907 return;
889 } 908 }
890 switch (ha->current_topology) { 909 switch (vha->hw->current_topology) {
891 case ISP_CFG_NL: 910 case ISP_CFG_NL:
892 port_type = FC_PORTTYPE_LPORT; 911 port_type = FC_PORTTYPE_LPORT;
893 break; 912 break;
@@ -908,11 +927,11 @@ static void
908qla2x00_get_starget_node_name(struct scsi_target *starget) 927qla2x00_get_starget_node_name(struct scsi_target *starget)
909{ 928{
910 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
911 scsi_qla_host_t *ha = shost_priv(host); 930 scsi_qla_host_t *vha = shost_priv(host);
912 fc_port_t *fcport; 931 fc_port_t *fcport;
913 u64 node_name = 0; 932 u64 node_name = 0;
914 933
915 list_for_each_entry(fcport, &ha->fcports, list) { 934 list_for_each_entry(fcport, &vha->vp_fcports, list) {
916 if (fcport->rport && 935 if (fcport->rport &&
917 starget->id == fcport->rport->scsi_target_id) { 936 starget->id == fcport->rport->scsi_target_id) {
918 node_name = wwn_to_u64(fcport->node_name); 937 node_name = wwn_to_u64(fcport->node_name);
@@ -927,11 +946,11 @@ static void
927qla2x00_get_starget_port_name(struct scsi_target *starget) 946qla2x00_get_starget_port_name(struct scsi_target *starget)
928{ 947{
929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
930 scsi_qla_host_t *ha = shost_priv(host); 949 scsi_qla_host_t *vha = shost_priv(host);
931 fc_port_t *fcport; 950 fc_port_t *fcport;
932 u64 port_name = 0; 951 u64 port_name = 0;
933 952
934 list_for_each_entry(fcport, &ha->fcports, list) { 953 list_for_each_entry(fcport, &vha->vp_fcports, list) {
935 if (fcport->rport && 954 if (fcport->rport &&
936 starget->id == fcport->rport->scsi_target_id) { 955 starget->id == fcport->rport->scsi_target_id) {
937 port_name = wwn_to_u64(fcport->port_name); 956 port_name = wwn_to_u64(fcport->port_name);
@@ -946,11 +965,11 @@ static void
946qla2x00_get_starget_port_id(struct scsi_target *starget) 965qla2x00_get_starget_port_id(struct scsi_target *starget)
947{ 966{
948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 967 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
949 scsi_qla_host_t *ha = shost_priv(host); 968 scsi_qla_host_t *vha = shost_priv(host);
950 fc_port_t *fcport; 969 fc_port_t *fcport;
951 uint32_t port_id = ~0U; 970 uint32_t port_id = ~0U;
952 971
953 list_for_each_entry(fcport, &ha->fcports, list) { 972 list_for_each_entry(fcport, &vha->vp_fcports, list) {
954 if (fcport->rport && 973 if (fcport->rport &&
955 starget->id == fcport->rport->scsi_target_id) { 974 starget->id == fcport->rport->scsi_target_id) {
956 port_id = fcport->d_id.b.domain << 16 | 975 port_id = fcport->d_id.b.domain << 16 |
@@ -999,9 +1018,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
999 * final cleanup of firmware resources (PCBs and XCBs). 1018 * final cleanup of firmware resources (PCBs and XCBs).
1000 */ 1019 */
1001 if (fcport->loop_id != FC_NO_LOOP_ID) { 1020 if (fcport->loop_id != FC_NO_LOOP_ID) {
1002 fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id, 1021 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1003 fcport->d_id.b.domain, fcport->d_id.b.area, 1022 fcport->loop_id, fcport->d_id.b.domain,
1004 fcport->d_id.b.al_pa); 1023 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1005 fcport->loop_id = FC_NO_LOOP_ID; 1024 fcport->loop_id = FC_NO_LOOP_ID;
1006 } 1025 }
1007 1026
@@ -1011,16 +1030,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1011static int 1030static int
1012qla2x00_issue_lip(struct Scsi_Host *shost) 1031qla2x00_issue_lip(struct Scsi_Host *shost)
1013{ 1032{
1014 scsi_qla_host_t *ha = shost_priv(shost); 1033 scsi_qla_host_t *vha = shost_priv(shost);
1015 1034
1016 qla2x00_loop_reset(ha); 1035 qla2x00_loop_reset(vha);
1017 return 0; 1036 return 0;
1018} 1037}
1019 1038
1020static struct fc_host_statistics * 1039static struct fc_host_statistics *
1021qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1040qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1022{ 1041{
1023 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1042 scsi_qla_host_t *vha = shost_priv(shost);
1043 struct qla_hw_data *ha = vha->hw;
1044 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1024 int rval; 1045 int rval;
1025 struct link_statistics *stats; 1046 struct link_statistics *stats;
1026 dma_addr_t stats_dma; 1047 dma_addr_t stats_dma;
@@ -1032,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1032 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1053 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1033 if (stats == NULL) { 1054 if (stats == NULL) {
1034 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1055 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1035 __func__, ha->host_no)); 1056 __func__, base_vha->host_no));
1036 goto done; 1057 goto done;
1037 } 1058 }
1038 memset(stats, 0, DMA_POOL_SIZE); 1059 memset(stats, 0, DMA_POOL_SIZE);
1039 1060
1040 rval = QLA_FUNCTION_FAILED; 1061 rval = QLA_FUNCTION_FAILED;
1041 if (IS_FWI2_CAPABLE(ha)) { 1062 if (IS_FWI2_CAPABLE(ha)) {
1042 rval = qla24xx_get_isp_stats(ha, stats, stats_dma); 1063 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1043 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 1064 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1044 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) && 1065 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1045 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) && 1066 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1046 !ha->dpc_active) { 1067 !ha->dpc_active) {
1047 /* Must be in a 'READY' state for statistics retrieval. */ 1068 /* Must be in a 'READY' state for statistics retrieval. */
1048 rval = qla2x00_get_link_status(ha, ha->loop_id, stats, 1069 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1049 stats_dma); 1070 stats, stats_dma);
1050 } 1071 }
1051 1072
1052 if (rval != QLA_SUCCESS) 1073 if (rval != QLA_SUCCESS)
@@ -1077,29 +1098,29 @@ done:
1077static void 1098static void
1078qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1099qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1079{ 1100{
1080 scsi_qla_host_t *ha = shost_priv(shost); 1101 scsi_qla_host_t *vha = shost_priv(shost);
1081 1102
1082 qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost)); 1103 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1083} 1104}
1084 1105
1085static void 1106static void
1086qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1107qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1087{ 1108{
1088 scsi_qla_host_t *ha = shost_priv(shost); 1109 scsi_qla_host_t *vha = shost_priv(shost);
1089 1110
1090 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 1111 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1091} 1112}
1092 1113
1093static void 1114static void
1094qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1115qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1095{ 1116{
1096 scsi_qla_host_t *ha = shost_priv(shost); 1117 scsi_qla_host_t *vha = shost_priv(shost);
1097 u64 node_name; 1118 u64 node_name;
1098 1119
1099 if (ha->device_flags & SWITCH_FOUND) 1120 if (vha->device_flags & SWITCH_FOUND)
1100 node_name = wwn_to_u64(ha->fabric_node_name); 1121 node_name = wwn_to_u64(vha->fabric_node_name);
1101 else 1122 else
1102 node_name = wwn_to_u64(ha->node_name); 1123 node_name = wwn_to_u64(vha->node_name);
1103 1124
1104 fc_host_fabric_name(shost) = node_name; 1125 fc_host_fabric_name(shost) = node_name;
1105} 1126}
@@ -1107,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1107static void 1128static void
1108qla2x00_get_host_port_state(struct Scsi_Host *shost) 1129qla2x00_get_host_port_state(struct Scsi_Host *shost)
1109{ 1130{
1110 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1131 scsi_qla_host_t *vha = shost_priv(shost);
1132 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1111 1133
1112 if (!ha->flags.online) 1134 if (!base_vha->flags.online)
1113 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1135 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1114 else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT) 1136 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1115 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1137 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1116 else 1138 else
1117 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1139 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@@ -1121,8 +1143,11 @@ static int
1121qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1143qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1122{ 1144{
1123 int ret = 0; 1145 int ret = 0;
1124 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 1146 int cnt = 0;
1125 scsi_qla_host_t *vha; 1147 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1148 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1149 scsi_qla_host_t *vha = NULL;
1150 struct qla_hw_data *ha = base_vha->hw;
1126 1151
1127 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1152 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1128 if (ret) { 1153 if (ret) {
@@ -1144,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1144 atomic_set(&vha->vp_state, VP_FAILED); 1169 atomic_set(&vha->vp_state, VP_FAILED);
1145 1170
1146 /* ready to create vport */ 1171 /* ready to create vport */
1147 qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx); 1172 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1173 vha->vp_idx);
1148 1174
1149 /* initialized vport states */ 1175 /* initialized vport states */
1150 atomic_set(&vha->loop_state, LOOP_DOWN); 1176 atomic_set(&vha->loop_state, LOOP_DOWN);
1151 vha->vp_err_state= VP_ERR_PORTDWN; 1177 vha->vp_err_state= VP_ERR_PORTDWN;
1152 vha->vp_prev_err_state= VP_ERR_UNKWN; 1178 vha->vp_prev_err_state= VP_ERR_UNKWN;
1153 /* Check if physical ha port is Up */ 1179 /* Check if physical ha port is Up */
1154 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 1180 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1155 atomic_read(&ha->loop_state) == LOOP_DEAD) { 1181 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1156 /* Don't retry or attempt login of this virtual port */ 1182 /* Don't retry or attempt login of this virtual port */
1157 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1183 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1158 vha->host_no)); 1184 base_vha->host_no));
1159 atomic_set(&vha->loop_state, LOOP_DEAD); 1185 atomic_set(&vha->loop_state, LOOP_DEAD);
1160 if (!disable) 1186 if (!disable)
1161 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1187 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1171,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1171 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1197 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1172 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1198 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1173 fc_host_supported_classes(vha->host) = 1199 fc_host_supported_classes(vha->host) =
1174 fc_host_supported_classes(ha->host); 1200 fc_host_supported_classes(base_vha->host);
1175 fc_host_supported_speeds(vha->host) = 1201 fc_host_supported_speeds(vha->host) =
1176 fc_host_supported_speeds(ha->host); 1202 fc_host_supported_speeds(base_vha->host);
1177 1203
1178 qla24xx_vport_disable(fc_vport, disable); 1204 qla24xx_vport_disable(fc_vport, disable);
1179 1205
1206 /* Create a queue pair for the vport */
1207 if (ha->mqenable) {
1208 if (ha->npiv_info) {
1209 for (; cnt < ha->nvram_npiv_size; cnt++) {
1210 if (ha->npiv_info[cnt].port_name ==
1211 vha->port_name &&
1212 ha->npiv_info[cnt].node_name ==
1213 vha->node_name) {
1214 qos = ha->npiv_info[cnt].q_qos;
1215 break;
1216 }
1217 }
1218 }
1219 qla25xx_create_queues(vha, qos);
1220 }
1221
1180 return 0; 1222 return 0;
1181vport_create_failed_2: 1223vport_create_failed_2:
1182 qla24xx_disable_vp(vha); 1224 qla24xx_disable_vp(vha);
1183 qla24xx_deallocate_vp_id(vha); 1225 qla24xx_deallocate_vp_id(vha);
1184 kfree(vha->port_name);
1185 kfree(vha->node_name);
1186 scsi_host_put(vha->host); 1226 scsi_host_put(vha->host);
1187 return FC_VPORT_FAILED; 1227 return FC_VPORT_FAILED;
1188} 1228}
@@ -1191,17 +1231,34 @@ static int
1191qla24xx_vport_delete(struct fc_vport *fc_vport) 1231qla24xx_vport_delete(struct fc_vport *fc_vport)
1192{ 1232{
1193 scsi_qla_host_t *vha = fc_vport->dd_data; 1233 scsi_qla_host_t *vha = fc_vport->dd_data;
1194 scsi_qla_host_t *pha = to_qla_parent(vha); 1234 fc_port_t *fcport, *tfcport;
1235 struct qla_hw_data *ha = vha->hw;
1236 uint16_t id = vha->vp_idx;
1195 1237
1196 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1238 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1197 test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags)) 1239 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1198 msleep(1000); 1240 msleep(1000);
1199 1241
1242 if (ha->mqenable) {
1243 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
1244 qla_printk(KERN_WARNING, ha,
1245 "Queue delete failed.\n");
1246 vha->req_ques[0] = ha->req_q_map[0]->id;
1247 }
1248
1200 qla24xx_disable_vp(vha); 1249 qla24xx_disable_vp(vha);
1201 qla24xx_deallocate_vp_id(vha);
1202 1250
1203 kfree(vha->node_name); 1251 fc_remove_host(vha->host);
1204 kfree(vha->port_name); 1252
1253 scsi_remove_host(vha->host);
1254
1255 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1256 list_del(&fcport->list);
1257 kfree(fcport);
1258 fcport = NULL;
1259 }
1260
1261 qla24xx_deallocate_vp_id(vha);
1205 1262
1206 if (vha->timer_active) { 1263 if (vha->timer_active) {
1207 qla2x00_vp_stop_timer(vha); 1264 qla2x00_vp_stop_timer(vha);
@@ -1210,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1210 vha->host_no, vha->vp_idx, vha)); 1267 vha->host_no, vha->vp_idx, vha));
1211 } 1268 }
1212 1269
1213 fc_remove_host(vha->host);
1214
1215 scsi_remove_host(vha->host);
1216
1217 scsi_host_put(vha->host); 1270 scsi_host_put(vha->host);
1218 1271 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1219 return 0; 1272 return 0;
1220} 1273}
1221 1274
@@ -1318,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1318}; 1371};
1319 1372
1320void 1373void
1321qla2x00_init_host_attr(scsi_qla_host_t *ha) 1374qla2x00_init_host_attr(scsi_qla_host_t *vha)
1322{ 1375{
1376 struct qla_hw_data *ha = vha->hw;
1323 u32 speed = FC_PORTSPEED_UNKNOWN; 1377 u32 speed = FC_PORTSPEED_UNKNOWN;
1324 1378
1325 fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); 1379 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1326 fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); 1380 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1327 fc_host_supported_classes(ha->host) = FC_COS_CLASS3; 1381 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
1328 fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;; 1382 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1329 fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count; 1383 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1330 1384
1331 if (IS_QLA25XX(ha)) 1385 if (IS_QLA25XX(ha))
1332 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1386 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -1338,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
1338 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1392 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1339 else 1393 else
1340 speed = FC_PORTSPEED_1GBIT; 1394 speed = FC_PORTSPEED_1GBIT;
1341 fc_host_supported_speeds(ha->host) = speed; 1395 fc_host_supported_speeds(vha->host) = speed;
1342} 1396}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 510ba64bc286..1cf77772623b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -9,7 +9,7 @@
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static inline void 11static inline void
12qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump) 12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 13{
14 fw_dump->fw_major_version = htonl(ha->fw_major_version); 14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
23} 23}
24 24
25static inline void * 25static inline void *
26qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr) 26qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
27{ 27{
28 struct req_que *req = ha->req_q_map[0];
29 struct rsp_que *rsp = ha->rsp_q_map[0];
28 /* Request queue. */ 30 /* Request queue. */
29 memcpy(ptr, ha->request_ring, ha->request_q_length * 31 memcpy(ptr, req->ring, req->length *
30 sizeof(request_t)); 32 sizeof(request_t));
31 33
32 /* Response queue. */ 34 /* Response queue. */
33 ptr += ha->request_q_length * sizeof(request_t); 35 ptr += req->length * sizeof(request_t);
34 memcpy(ptr, ha->response_ring, ha->response_q_length * 36 memcpy(ptr, rsp->ring, rsp->length *
35 sizeof(response_t)); 37 sizeof(response_t));
36 38
37 return ptr + (ha->response_q_length * sizeof(response_t)); 39 return ptr + (rsp->length * sizeof(response_t));
38} 40}
39 41
40static int 42static int
41qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram, 43qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
42 uint32_t ram_dwords, void **nxt) 44 uint32_t ram_dwords, void **nxt)
43{ 45{
44 int rval; 46 int rval;
@@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
112} 114}
113 115
114static int 116static int
115qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram, 117qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
116 uint32_t cram_size, void **nxt) 118 uint32_t cram_size, void **nxt)
117{ 119{
118 int rval; 120 int rval;
@@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
163} 165}
164 166
165static int 167static int
166qla24xx_soft_reset(scsi_qla_host_t *ha) 168qla24xx_soft_reset(struct qla_hw_data *ha)
167{ 169{
168 int rval = QLA_SUCCESS; 170 int rval = QLA_SUCCESS;
169 uint32_t cnt; 171 uint32_t cnt;
@@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
215} 217}
216 218
217static int 219static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 220qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
219 uint32_t ram_words, void **nxt) 221 uint16_t ram_words, void **nxt)
220{ 222{
221 int rval; 223 int rval;
222 uint32_t cnt, stat, timer, words, idx; 224 uint32_t cnt, stat, timer, words, idx;
@@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
314 * @hardware_locked: Called with the hardware_lock 316 * @hardware_locked: Called with the hardware_lock
315 */ 317 */
316void 318void
317qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 319qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
318{ 320{
319 int rval; 321 int rval;
320 uint32_t cnt; 322 uint32_t cnt;
321 323 struct qla_hw_data *ha = vha->hw;
322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 324 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323 uint16_t __iomem *dmp_reg; 325 uint16_t __iomem *dmp_reg;
324 unsigned long flags; 326 unsigned long flags;
325 struct qla2300_fw_dump *fw; 327 struct qla2300_fw_dump *fw;
326 void *nxt; 328 void *nxt;
329 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
327 330
328 flags = 0; 331 flags = 0;
329 332
@@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
468 } else { 471 } else {
469 qla_printk(KERN_INFO, ha, 472 qla_printk(KERN_INFO, ha,
470 "Firmware dump saved to temp buffer (%ld/%p).\n", 473 "Firmware dump saved to temp buffer (%ld/%p).\n",
471 ha->host_no, ha->fw_dump); 474 base_vha->host_no, ha->fw_dump);
472 ha->fw_dumped = 1; 475 ha->fw_dumped = 1;
473 } 476 }
474 477
@@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
483 * @hardware_locked: Called with the hardware_lock 486 * @hardware_locked: Called with the hardware_lock
484 */ 487 */
485void 488void
486qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 489qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
487{ 490{
488 int rval; 491 int rval;
489 uint32_t cnt, timer; 492 uint32_t cnt, timer;
490 uint16_t risc_address; 493 uint16_t risc_address;
491 uint16_t mb0, mb2; 494 uint16_t mb0, mb2;
495 struct qla_hw_data *ha = vha->hw;
492 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 496 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
493 uint16_t __iomem *dmp_reg; 497 uint16_t __iomem *dmp_reg;
494 unsigned long flags; 498 unsigned long flags;
495 struct qla2100_fw_dump *fw; 499 struct qla2100_fw_dump *fw;
500 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
496 501
497 risc_address = 0; 502 risc_address = 0;
498 mb0 = mb2 = 0; 503 mb0 = mb2 = 0;
@@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
673 } else { 678 } else {
674 qla_printk(KERN_INFO, ha, 679 qla_printk(KERN_INFO, ha,
675 "Firmware dump saved to temp buffer (%ld/%p).\n", 680 "Firmware dump saved to temp buffer (%ld/%p).\n",
676 ha->host_no, ha->fw_dump); 681 base_vha->host_no, ha->fw_dump);
677 ha->fw_dumped = 1; 682 ha->fw_dumped = 1;
678 } 683 }
679 684
@@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
683} 688}
684 689
685void 690void
686qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 691qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
687{ 692{
688 int rval; 693 int rval;
689 uint32_t cnt; 694 uint32_t cnt;
690 uint32_t risc_address; 695 uint32_t risc_address;
691 696 struct qla_hw_data *ha = vha->hw;
692 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 697 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
693 uint32_t __iomem *dmp_reg; 698 uint32_t __iomem *dmp_reg;
694 uint32_t *iter_reg; 699 uint32_t *iter_reg;
@@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
697 struct qla24xx_fw_dump *fw; 702 struct qla24xx_fw_dump *fw;
698 uint32_t ext_mem_cnt; 703 uint32_t ext_mem_cnt;
699 void *nxt; 704 void *nxt;
705 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700 706
701 risc_address = ext_mem_cnt = 0; 707 risc_address = ext_mem_cnt = 0;
702 flags = 0; 708 flags = 0;
@@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
919 } else { 925 } else {
920 qla_printk(KERN_INFO, ha, 926 qla_printk(KERN_INFO, ha,
921 "Firmware dump saved to temp buffer (%ld/%p).\n", 927 "Firmware dump saved to temp buffer (%ld/%p).\n",
922 ha->host_no, ha->fw_dump); 928 base_vha->host_no, ha->fw_dump);
923 ha->fw_dumped = 1; 929 ha->fw_dumped = 1;
924 } 930 }
925 931
@@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
929} 935}
930 936
931void 937void
932qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 938qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
933{ 939{
934 int rval; 940 int rval;
935 uint32_t cnt; 941 uint32_t cnt;
936 uint32_t risc_address; 942 uint32_t risc_address;
937 943 struct qla_hw_data *ha = vha->hw;
938 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 944 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
945 struct device_reg_25xxmq __iomem *reg25;
939 uint32_t __iomem *dmp_reg; 946 uint32_t __iomem *dmp_reg;
940 uint32_t *iter_reg; 947 uint32_t *iter_reg;
941 uint16_t __iomem *mbx_reg; 948 uint16_t __iomem *mbx_reg;
@@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
944 uint32_t ext_mem_cnt; 951 uint32_t ext_mem_cnt;
945 void *nxt; 952 void *nxt;
946 struct qla2xxx_fce_chain *fcec; 953 struct qla2xxx_fce_chain *fcec;
954 struct qla2xxx_mq_chain *mq = NULL;
955 uint32_t qreg_size;
956 uint8_t req_cnt, rsp_cnt, que_cnt;
957 uint32_t que_idx;
958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
947 959
948 risc_address = ext_mem_cnt = 0; 960 risc_address = ext_mem_cnt = 0;
949 flags = 0; 961 flags = 0;
@@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
988 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1000 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
989 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1001 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
990 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); 1002 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1003
1004 /* Multi queue registers */
1005 if (ha->mqenable) {
1006 qreg_size = sizeof(struct qla2xxx_mq_chain);
1007 mq = kzalloc(qreg_size, GFP_KERNEL);
1008 if (!mq)
1009 goto qla25xx_fw_dump_failed_0;
1010 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
1011 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
1012 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
1013 mq->count = htonl(que_cnt);
1014 mq->chain_size = htonl(qreg_size);
1015 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
1016 for (cnt = 0; cnt < que_cnt; cnt++) {
1017 reg25 = (struct device_reg_25xxmq *) ((void *)
1018 ha->mqiobase + cnt * QLA_QUE_PAGE);
1019 que_idx = cnt * 4;
1020 mq->qregs[que_idx] = htonl(reg25->req_q_in);
1021 mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
1022 mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
1023 mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
1024 }
1025 }
991 WRT_REG_DWORD(&reg->iobase_window, 0x00); 1026 WRT_REG_DWORD(&reg->iobase_window, 0x00);
992 RD_REG_DWORD(&reg->iobase_window); 1027 RD_REG_DWORD(&reg->iobase_window);
993 1028
@@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1225 1260
1226 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1261 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1227 1262
1228 fcec = nxt + ntohl(ha->fw_dump->eft_size); 1263 if (ha->mqenable) {
1264 nxt = nxt + ntohl(ha->fw_dump->eft_size);
1265 memcpy(nxt, mq, qreg_size);
1266 kfree(mq);
1267 fcec = nxt + qreg_size;
1268 } else {
1269 fcec = nxt + ntohl(ha->fw_dump->eft_size);
1270 }
1229 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST); 1271 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1230 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 1272 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1231 fce_calc_size(ha->fce_bufs)); 1273 fce_calc_size(ha->fce_bufs));
@@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
1248 } else { 1290 } else {
1249 qla_printk(KERN_INFO, ha, 1291 qla_printk(KERN_INFO, ha,
1250 "Firmware dump saved to temp buffer (%ld/%p).\n", 1292 "Firmware dump saved to temp buffer (%ld/%p).\n",
1251 ha->host_no, ha->fw_dump); 1293 base_vha->host_no, ha->fw_dump);
1252 ha->fw_dumped = 1; 1294 ha->fw_dumped = 1;
1253 } 1295 }
1254 1296
@@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
1256 if (!hardware_locked) 1298 if (!hardware_locked)
1257 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1258} 1300}
1259
1260/****************************************************************************/ 1301/****************************************************************************/
1261/* Driver Debug Functions. */ 1302/* Driver Debug Functions. */
1262/****************************************************************************/ 1303/****************************************************************************/
1263 1304
1264void 1305void
1265qla2x00_dump_regs(scsi_qla_host_t *ha) 1306qla2x00_dump_regs(scsi_qla_host_t *vha)
1266{ 1307{
1267 int i; 1308 int i;
1309 struct qla_hw_data *ha = vha->hw;
1268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1269 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1311 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1270 uint16_t __iomem *mbx_reg; 1312 uint16_t __iomem *mbx_reg;
@@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
1274 1316
1275 printk("Mailbox registers:\n"); 1317 printk("Mailbox registers:\n");
1276 for (i = 0; i < 6; i++) 1318 for (i = 0; i < 6; i++)
1277 printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i, 1319 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1278 RD_REG_WORD(mbx_reg++)); 1320 RD_REG_WORD(mbx_reg++));
1279} 1321}
1280 1322
@@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1302 if (cnt % 16) 1344 if (cnt % 16)
1303 printk("\n"); 1345 printk("\n");
1304} 1346}
1347
1348
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e9c0c097f5e..c1794a70a45f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -4,6 +4,9 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8#include "qla_def.h"
9
7/* 10/*
8 * Driver debug definitions. 11 * Driver debug definitions.
9 */ 12 */
@@ -23,6 +26,7 @@
23/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
24/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
25/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
26 30
27/* 31/*
28* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -43,6 +47,7 @@
43#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) 47#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
44#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) 48#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) 49#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46 51
47#if defined(QL_DEBUG_LEVEL_3) 52#if defined(QL_DEBUG_LEVEL_3)
48#define DEBUG3(x) do {x;} while (0) 53#define DEBUG3(x) do {x;} while (0)
@@ -127,7 +132,6 @@
127#else 132#else
128#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
129#endif 134#endif
130
131/* 135/*
132 * Firmware Dump structure definition 136 * Firmware Dump structure definition
133 */ 137 */
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain {
266 uint32_t eregs[8]; 270 uint32_t eregs[8];
267}; 271};
268 272
273struct qla2xxx_mq_chain {
274 uint32_t type;
275 uint32_t chain_size;
276
277 uint32_t count;
278 uint32_t qregs[4 * QLA_MQ_SIZE];
279};
280
269#define DUMP_CHAIN_VARIANT 0x80000000 281#define DUMP_CHAIN_VARIANT 0x80000000
270#define DUMP_CHAIN_FCE 0x7FFFFAF0 282#define DUMP_CHAIN_FCE 0x7FFFFAF0
283#define DUMP_CHAIN_MQ 0x7FFFFAF1
271#define DUMP_CHAIN_LAST 0x80000000 284#define DUMP_CHAIN_LAST 0x80000000
272 285
273struct qla2xxx_fw_dump { 286struct qla2xxx_fw_dump {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b97194096d8e..a29c95204975 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -181,11 +181,14 @@
181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183 183
184struct req_que;
185
184/* 186/*
185 * SCSI Request Block 187 * SCSI Request Block
186 */ 188 */
187typedef struct srb { 189typedef struct srb {
188 struct scsi_qla_host *ha; /* HA the SP is queued on */ 190 struct scsi_qla_host *vha; /* HA the SP is queued on */
191 struct req_que *que;
189 struct fc_port *fcport; 192 struct fc_port *fcport;
190 193
191 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 194 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -369,9 +372,17 @@ struct device_reg_2xxx {
369 } u_end; 372 } u_end;
370}; 373};
371 374
375struct device_reg_25xxmq {
376 volatile uint32_t req_q_in;
377 volatile uint32_t req_q_out;
378 volatile uint32_t rsp_q_in;
379 volatile uint32_t rsp_q_out;
380};
381
372typedef union { 382typedef union {
373 struct device_reg_2xxx isp; 383 struct device_reg_2xxx isp;
374 struct device_reg_24xx isp24; 384 struct device_reg_24xx isp24;
385 struct device_reg_25xxmq isp25mq;
375} device_reg_t; 386} device_reg_t;
376 387
377#define ISP_REQ_Q_IN(ha, reg) \ 388#define ISP_REQ_Q_IN(ha, reg) \
@@ -1524,7 +1535,7 @@ typedef struct {
1524 */ 1535 */
1525typedef struct fc_port { 1536typedef struct fc_port {
1526 struct list_head list; 1537 struct list_head list;
1527 struct scsi_qla_host *ha; 1538 struct scsi_qla_host *vha;
1528 1539
1529 uint8_t node_name[WWN_SIZE]; 1540 uint8_t node_name[WWN_SIZE];
1530 uint8_t port_name[WWN_SIZE]; 1541 uint8_t port_name[WWN_SIZE];
@@ -1550,7 +1561,6 @@ typedef struct fc_port {
1550 unsigned long last_queue_full; 1561 unsigned long last_queue_full;
1551 unsigned long last_ramp_up; 1562 unsigned long last_ramp_up;
1552 1563
1553 struct list_head vp_fcport;
1554 uint16_t vp_idx; 1564 uint16_t vp_idx;
1555} fc_port_t; 1565} fc_port_t;
1556 1566
@@ -2037,6 +2047,8 @@ typedef struct vport_params {
2037#define VP_RET_CODE_NO_MEM 5 2047#define VP_RET_CODE_NO_MEM 5
2038#define VP_RET_CODE_NOT_FOUND 6 2048#define VP_RET_CODE_NOT_FOUND 6
2039 2049
2050struct qla_hw_data;
2051
2040/* 2052/*
2041 * ISP operations 2053 * ISP operations
2042 */ 2054 */
@@ -2055,10 +2067,11 @@ struct isp_operations {
2055 char * (*fw_version_str) (struct scsi_qla_host *, char *); 2067 char * (*fw_version_str) (struct scsi_qla_host *, char *);
2056 2068
2057 irq_handler_t intr_handler; 2069 irq_handler_t intr_handler;
2058 void (*enable_intrs) (struct scsi_qla_host *); 2070 void (*enable_intrs) (struct qla_hw_data *);
2059 void (*disable_intrs) (struct scsi_qla_host *); 2071 void (*disable_intrs) (struct qla_hw_data *);
2060 2072
2061 int (*abort_command) (struct scsi_qla_host *, srb_t *); 2073 int (*abort_command) (struct scsi_qla_host *, srb_t *,
2074 struct req_que *);
2062 int (*target_reset) (struct fc_port *, unsigned int); 2075 int (*target_reset) (struct fc_port *, unsigned int);
2063 int (*lun_reset) (struct fc_port *, unsigned int); 2076 int (*lun_reset) (struct fc_port *, unsigned int);
2064 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2077 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2089,6 +2102,10 @@ struct isp_operations {
2089 uint32_t); 2102 uint32_t);
2090 2103
2091 int (*get_flash_version) (struct scsi_qla_host *, void *); 2104 int (*get_flash_version) (struct scsi_qla_host *, void *);
2105 int (*start_scsi) (srb_t *);
2106 void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2107 void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2108 uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
2092}; 2109};
2093 2110
2094/* MSI-X Support *************************************************************/ 2111/* MSI-X Support *************************************************************/
@@ -2100,16 +2117,18 @@ struct isp_operations {
2100#define QLA_MSIX_DEFAULT 0x00 2117#define QLA_MSIX_DEFAULT 0x00
2101#define QLA_MSIX_RSP_Q 0x01 2118#define QLA_MSIX_RSP_Q 0x01
2102 2119
2103#define QLA_MSIX_ENTRIES 2
2104#define QLA_MIDX_DEFAULT 0 2120#define QLA_MIDX_DEFAULT 0
2105#define QLA_MIDX_RSP_Q 1 2121#define QLA_MIDX_RSP_Q 1
2122#define QLA_PCI_MSIX_CONTROL 0xa2
2106 2123
2107struct scsi_qla_host; 2124struct scsi_qla_host;
2125struct rsp_que;
2108 2126
2109struct qla_msix_entry { 2127struct qla_msix_entry {
2110 int have_irq; 2128 int have_irq;
2111 uint32_t msix_vector; 2129 uint32_t vector;
2112 uint16_t msix_entry; 2130 uint16_t entry;
2131 struct rsp_que *rsp;
2113}; 2132};
2114 2133
2115#define WATCH_INTERVAL 1 /* number of seconds */ 2134#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2160,208 +2179,137 @@ struct qla_statistics {
2160 uint64_t output_bytes; 2179 uint64_t output_bytes;
2161}; 2180};
2162 2181
2163/* 2182/* Multi queue support */
2164 * Linux Host Adapter structure 2183#define MBC_INITIALIZE_MULTIQ 0x1f
2165 */ 2184#define QLA_QUE_PAGE 0X1000
2166typedef struct scsi_qla_host { 2185#define QLA_MQ_SIZE 32
2167 struct list_head list; 2186#define QLA_MAX_HOST_QUES 16
2187#define QLA_MAX_QUEUES 256
2188#define ISP_QUE_REG(ha, id) \
2189 ((ha->mqenable) ? \
2190 ((void *)(ha->mqiobase) +\
2191 (QLA_QUE_PAGE * id)) :\
2192 ((void *)(ha->iobase)))
2193#define QLA_REQ_QUE_ID(tag) \
2194 ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
2195#define QLA_DEFAULT_QUE_QOS 5
2196#define QLA_PRECONFIG_VPORTS 32
2197#define QLA_MAX_VPORTS_QLA24XX 128
2198#define QLA_MAX_VPORTS_QLA25XX 256
2199/* Response queue data structure */
2200struct rsp_que {
2201 dma_addr_t dma;
2202 response_t *ring;
2203 response_t *ring_ptr;
2204 uint16_t ring_index;
2205 uint16_t out_ptr;
2206 uint16_t length;
2207 uint16_t options;
2208 uint16_t rid;
2209 uint16_t id;
2210 uint16_t vp_idx;
2211 struct qla_hw_data *hw;
2212 struct qla_msix_entry *msix;
2213 struct req_que *req;
2214};
2168 2215
2169 /* Commonly used flags and state information. */ 2216/* Request queue data structure */
2170 struct Scsi_Host *host; 2217struct req_que {
2171 struct pci_dev *pdev; 2218 dma_addr_t dma;
2219 request_t *ring;
2220 request_t *ring_ptr;
2221 uint16_t ring_index;
2222 uint16_t in_ptr;
2223 uint16_t cnt;
2224 uint16_t length;
2225 uint16_t options;
2226 uint16_t rid;
2227 uint16_t id;
2228 uint16_t qos;
2229 uint16_t vp_idx;
2230 struct rsp_que *rsp;
2231 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
2232 uint32_t current_outstanding_cmd;
2233 int max_q_depth;
2234};
2172 2235
2173 unsigned long host_no; 2236/*
2237 * Qlogic host adapter specific data structure.
2238*/
2239struct qla_hw_data {
2240 struct pci_dev *pdev;
2241 /* SRB cache. */
2242#define SRB_MIN_REQ 128
2243 mempool_t *srb_mempool;
2174 2244
2175 volatile struct { 2245 volatile struct {
2176 uint32_t init_done :1;
2177 uint32_t online :1;
2178 uint32_t mbox_int :1; 2246 uint32_t mbox_int :1;
2179 uint32_t mbox_busy :1; 2247 uint32_t mbox_busy :1;
2180 uint32_t rscn_queue_overflow :1;
2181 uint32_t reset_active :1;
2182
2183 uint32_t management_server_logged_in :1;
2184 uint32_t process_response_queue :1;
2185 2248
2186 uint32_t disable_risc_code_load :1; 2249 uint32_t disable_risc_code_load :1;
2187 uint32_t enable_64bit_addressing :1; 2250 uint32_t enable_64bit_addressing :1;
2188 uint32_t enable_lip_reset :1; 2251 uint32_t enable_lip_reset :1;
2189 uint32_t enable_lip_full_login :1;
2190 uint32_t enable_target_reset :1; 2252 uint32_t enable_target_reset :1;
2253 uint32_t enable_lip_full_login :1;
2191 uint32_t enable_led_scheme :1; 2254 uint32_t enable_led_scheme :1;
2192 uint32_t inta_enabled :1; 2255 uint32_t inta_enabled :1;
2193 uint32_t msi_enabled :1; 2256 uint32_t msi_enabled :1;
2194 uint32_t msix_enabled :1; 2257 uint32_t msix_enabled :1;
2195 uint32_t disable_serdes :1; 2258 uint32_t disable_serdes :1;
2196 uint32_t gpsc_supported :1; 2259 uint32_t gpsc_supported :1;
2197 uint32_t vsan_enabled :1; 2260 uint32_t vsan_enabled :1;
2198 uint32_t npiv_supported :1; 2261 uint32_t npiv_supported :1;
2199 uint32_t fce_enabled :1; 2262 uint32_t fce_enabled :1;
2200 uint32_t hw_event_marker_found :1; 2263 uint32_t hw_event_marker_found:1;
2201 } flags; 2264 } flags;
2202 2265
2203 atomic_t loop_state;
2204#define LOOP_TIMEOUT 1
2205#define LOOP_DOWN 2
2206#define LOOP_UP 3
2207#define LOOP_UPDATE 4
2208#define LOOP_READY 5
2209#define LOOP_DEAD 6
2210
2211 unsigned long dpc_flags;
2212#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2213#define RESET_ACTIVE 1
2214#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2215#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2216#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2217#define LOOP_RESYNC_ACTIVE 5
2218#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2219#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2220#define MAILBOX_RETRY 8
2221#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2222#define FAILOVER_EVENT_NEEDED 10
2223#define FAILOVER_EVENT 11
2224#define FAILOVER_NEEDED 12
2225#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2226#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2227#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2228#define ABORT_QUEUES_NEEDED 16
2229#define RELOGIN_NEEDED 17
2230#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2231#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2232#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2233#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2234#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2235#define IOCTL_ERROR_RECOVERY 23
2236#define LOOP_RESET_NEEDED 24
2237#define BEACON_BLINK_NEEDED 25
2238#define REGISTER_FDMI_NEEDED 26
2239#define FCPORT_UPDATE_NEEDED 27
2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2243
2244 uint32_t device_flags;
2245#define DFLG_LOCAL_DEVICES BIT_0
2246#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2247#define DFLG_FABRIC_DEVICES BIT_2
2248#define SWITCH_FOUND BIT_3
2249#define DFLG_NO_CABLE BIT_4
2250
2251#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2252#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2253 uint32_t device_type;
2254#define DT_ISP2100 BIT_0
2255#define DT_ISP2200 BIT_1
2256#define DT_ISP2300 BIT_2
2257#define DT_ISP2312 BIT_3
2258#define DT_ISP2322 BIT_4
2259#define DT_ISP6312 BIT_5
2260#define DT_ISP6322 BIT_6
2261#define DT_ISP2422 BIT_7
2262#define DT_ISP2432 BIT_8
2263#define DT_ISP5422 BIT_9
2264#define DT_ISP5432 BIT_10
2265#define DT_ISP2532 BIT_11
2266#define DT_ISP8432 BIT_12
2267#define DT_ISP_LAST (DT_ISP8432 << 1)
2268
2269#define DT_IIDMA BIT_26
2270#define DT_FWI2 BIT_27
2271#define DT_ZIO_SUPPORTED BIT_28
2272#define DT_OEM_001 BIT_29
2273#define DT_ISP2200A BIT_30
2274#define DT_EXTENDED_IDS BIT_31
2275
2276#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2277#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2278#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2279#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2280#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2281#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2282#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2283#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2284#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2285#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2286#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2287#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2288#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2289#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2290
2291#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2292 IS_QLA6312(ha) || IS_QLA6322(ha))
2293#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2294#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2295#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2296#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2297#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2298 IS_QLA84XX(ha))
2299
2300#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2301#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2302#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2303#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2304#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2305
2306 /* SRB cache. */
2307#define SRB_MIN_REQ 128
2308 mempool_t *srb_mempool;
2309
2310 /* This spinlock is used to protect "io transactions", you must 2266 /* This spinlock is used to protect "io transactions", you must
2311 * acquire it before doing any IO to the card, eg with RD_REG*() and 2267 * acquire it before doing any IO to the card, eg with RD_REG*() and
2312 * WRT_REG*() for the duration of your entire commandtransaction. 2268 * WRT_REG*() for the duration of your entire commandtransaction.
2313 * 2269 *
2314 * This spinlock is of lower priority than the io request lock. 2270 * This spinlock is of lower priority than the io request lock.
2315 */ 2271 */
2316
2317 spinlock_t hardware_lock ____cacheline_aligned;
2318 2272
2273 spinlock_t hardware_lock ____cacheline_aligned;
2319 int bars; 2274 int bars;
2320 int mem_only; 2275 int mem_only;
2321 device_reg_t __iomem *iobase; /* Base I/O address */ 2276 device_reg_t __iomem *iobase; /* Base I/O address */
2322 resource_size_t pio_address; 2277 resource_size_t pio_address;
2323#define MIN_IOBASE_LEN 0x100
2324
2325 /* ISP ring lock, rings, and indexes */
2326 dma_addr_t request_dma; /* Physical address. */
2327 request_t *request_ring; /* Base virtual address */
2328 request_t *request_ring_ptr; /* Current address. */
2329 uint16_t req_ring_index; /* Current index. */
2330 uint16_t req_q_cnt; /* Number of available entries. */
2331 uint16_t request_q_length;
2332
2333 dma_addr_t response_dma; /* Physical address. */
2334 response_t *response_ring; /* Base virtual address */
2335 response_t *response_ring_ptr; /* Current address. */
2336 uint16_t rsp_ring_index; /* Current index. */
2337 uint16_t response_q_length;
2338
2339 struct isp_operations *isp_ops;
2340 2278
2341 /* Outstandings ISP commands. */ 2279#define MIN_IOBASE_LEN 0x100
2342 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; 2280/* Multi queue data structs */
2343 uint32_t current_outstanding_cmd; 2281 device_reg_t *mqiobase;
2344 srb_t *status_srb; /* Status continuation entry. */ 2282 uint16_t msix_count;
2283 uint8_t mqenable;
2284 struct req_que **req_q_map;
2285 struct rsp_que **rsp_q_map;
2286 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2287 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2288 uint16_t max_queues;
2289 struct qla_npiv_entry *npiv_info;
2290 uint16_t nvram_npiv_size;
2291
2292 uint16_t switch_cap;
2293#define FLOGI_SEQ_DEL BIT_8
2294#define FLOGI_MID_SUPPORT BIT_10
2295#define FLOGI_VSAN_SUPPORT BIT_12
2296#define FLOGI_SP_SUPPORT BIT_13
2297 /* Timeout timers. */
2298 uint8_t loop_down_abort_time; /* port down timer */
2299 atomic_t loop_down_timer; /* loop down timer */
2300 uint8_t link_down_timeout; /* link down timeout */
2301 uint16_t max_loop_id;
2345 2302
2346 /* ISP configuration data. */
2347 uint16_t loop_id; /* Host adapter loop id */
2348 uint16_t switch_cap;
2349#define FLOGI_SEQ_DEL BIT_8
2350#define FLOGI_MID_SUPPORT BIT_10
2351#define FLOGI_VSAN_SUPPORT BIT_12
2352#define FLOGI_SP_SUPPORT BIT_13
2353 uint16_t fb_rev; 2303 uint16_t fb_rev;
2354
2355 port_id_t d_id; /* Host adapter port id */
2356 uint16_t max_public_loop_ids; 2304 uint16_t max_public_loop_ids;
2357 uint16_t min_external_loopid; /* First external loop Id */ 2305 uint16_t min_external_loopid; /* First external loop Id */
2358 2306
2359#define PORT_SPEED_UNKNOWN 0xFFFF 2307#define PORT_SPEED_UNKNOWN 0xFFFF
2360#define PORT_SPEED_1GB 0x00 2308#define PORT_SPEED_1GB 0x00
2361#define PORT_SPEED_2GB 0x01 2309#define PORT_SPEED_2GB 0x01
2362#define PORT_SPEED_4GB 0x03 2310#define PORT_SPEED_4GB 0x03
2363#define PORT_SPEED_8GB 0x04 2311#define PORT_SPEED_8GB 0x04
2364 uint16_t link_data_rate; /* F/W operating speed */ 2312 uint16_t link_data_rate; /* F/W operating speed */
2365 2313
2366 uint8_t current_topology; 2314 uint8_t current_topology;
2367 uint8_t prev_topology; 2315 uint8_t prev_topology;
@@ -2370,15 +2318,69 @@ typedef struct scsi_qla_host {
2370#define ISP_CFG_FL 4 2318#define ISP_CFG_FL 4
2371#define ISP_CFG_F 8 2319#define ISP_CFG_F 8
2372 2320
2373 uint8_t operating_mode; /* F/W operating mode */ 2321 uint8_t operating_mode; /* F/W operating mode */
2374#define LOOP 0 2322#define LOOP 0
2375#define P2P 1 2323#define P2P 1
2376#define LOOP_P2P 2 2324#define LOOP_P2P 2
2377#define P2P_LOOP 3 2325#define P2P_LOOP 3
2378
2379 uint8_t marker_needed;
2380
2381 uint8_t interrupts_on; 2326 uint8_t interrupts_on;
2327 uint32_t isp_abort_cnt;
2328
2329#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2330#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2331 uint32_t device_type;
2332#define DT_ISP2100 BIT_0
2333#define DT_ISP2200 BIT_1
2334#define DT_ISP2300 BIT_2
2335#define DT_ISP2312 BIT_3
2336#define DT_ISP2322 BIT_4
2337#define DT_ISP6312 BIT_5
2338#define DT_ISP6322 BIT_6
2339#define DT_ISP2422 BIT_7
2340#define DT_ISP2432 BIT_8
2341#define DT_ISP5422 BIT_9
2342#define DT_ISP5432 BIT_10
2343#define DT_ISP2532 BIT_11
2344#define DT_ISP8432 BIT_12
2345#define DT_ISP_LAST (DT_ISP8432 << 1)
2346
2347#define DT_IIDMA BIT_26
2348#define DT_FWI2 BIT_27
2349#define DT_ZIO_SUPPORTED BIT_28
2350#define DT_OEM_001 BIT_29
2351#define DT_ISP2200A BIT_30
2352#define DT_EXTENDED_IDS BIT_31
2353#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2354#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2355#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2356#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2357#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2358#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2359#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2360#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2361#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2362#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2363#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2364#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2365#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2366#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2367
2368#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2369 IS_QLA6312(ha) || IS_QLA6322(ha))
2370#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2371#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2372#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2373#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2374#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2375 IS_QLA84XX(ha))
2376#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2377 IS_QLA25XX(ha))
2378
2379#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2380#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2381#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2382#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2383#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2382 2384
2383 /* HBA serial number */ 2385 /* HBA serial number */
2384 uint8_t serial0; 2386 uint8_t serial0;
@@ -2386,8 +2388,8 @@ typedef struct scsi_qla_host {
2386 uint8_t serial2; 2388 uint8_t serial2;
2387 2389
2388 /* NVRAM configuration data */ 2390 /* NVRAM configuration data */
2389#define MAX_NVRAM_SIZE 4096 2391#define MAX_NVRAM_SIZE 4096
2390#define VPD_OFFSET MAX_NVRAM_SIZE / 2 2392#define VPD_OFFSET MAX_NVRAM_SIZE / 2
2391 uint16_t nvram_size; 2393 uint16_t nvram_size;
2392 uint16_t nvram_base; 2394 uint16_t nvram_base;
2393 void *nvram; 2395 void *nvram;
@@ -2401,22 +2403,8 @@ typedef struct scsi_qla_host {
2401 uint16_t r_a_tov; 2403 uint16_t r_a_tov;
2402 int port_down_retry_count; 2404 int port_down_retry_count;
2403 uint8_t mbx_count; 2405 uint8_t mbx_count;
2404 uint16_t last_loop_id;
2405 uint16_t mgmt_svr_loop_id;
2406
2407 uint32_t login_retry_count;
2408 int max_q_depth;
2409
2410 struct list_head work_list;
2411
2412 /* Fibre Channel Device List. */
2413 struct list_head fcports;
2414
2415 /* RSCN queue. */
2416 uint32_t rscn_queue[MAX_RSCN_COUNT];
2417 uint8_t rscn_in_ptr;
2418 uint8_t rscn_out_ptr;
2419 2406
2407 uint32_t login_retry_count;
2420 /* SNS command interfaces. */ 2408 /* SNS command interfaces. */
2421 ms_iocb_entry_t *ms_iocb; 2409 ms_iocb_entry_t *ms_iocb;
2422 dma_addr_t ms_iocb_dma; 2410 dma_addr_t ms_iocb_dma;
@@ -2426,28 +2414,20 @@ typedef struct scsi_qla_host {
2426 struct sns_cmd_pkt *sns_cmd; 2414 struct sns_cmd_pkt *sns_cmd;
2427 dma_addr_t sns_cmd_dma; 2415 dma_addr_t sns_cmd_dma;
2428 2416
2429#define SFP_DEV_SIZE 256 2417#define SFP_DEV_SIZE 256
2430#define SFP_BLOCK_SIZE 64 2418#define SFP_BLOCK_SIZE 64
2431 void *sfp_data; 2419 void *sfp_data;
2432 dma_addr_t sfp_data_dma; 2420 dma_addr_t sfp_data_dma;
2433 2421
2434 struct task_struct *dpc_thread; 2422 struct task_struct *dpc_thread;
2435 uint8_t dpc_active; /* DPC routine is active */ 2423 uint8_t dpc_active; /* DPC routine is active */
2436 2424
2437 /* Timeout timers. */
2438 uint8_t loop_down_abort_time; /* port down timer */
2439 atomic_t loop_down_timer; /* loop down timer */
2440 uint8_t link_down_timeout; /* link down timeout */
2441
2442 uint32_t timer_active;
2443 struct timer_list timer;
2444
2445 dma_addr_t gid_list_dma; 2425 dma_addr_t gid_list_dma;
2446 struct gid_list_info *gid_list; 2426 struct gid_list_info *gid_list;
2447 int gid_list_info_size; 2427 int gid_list_info_size;
2448 2428
2449 /* Small DMA pool allocations -- maximum 256 bytes in length. */ 2429 /* Small DMA pool allocations -- maximum 256 bytes in length. */
2450#define DMA_POOL_SIZE 256 2430#define DMA_POOL_SIZE 256
2451 struct dma_pool *s_dma_pool; 2431 struct dma_pool *s_dma_pool;
2452 2432
2453 dma_addr_t init_cb_dma; 2433 dma_addr_t init_cb_dma;
@@ -2459,17 +2439,17 @@ typedef struct scsi_qla_host {
2459 2439
2460 mbx_cmd_t *mcp; 2440 mbx_cmd_t *mcp;
2461 unsigned long mbx_cmd_flags; 2441 unsigned long mbx_cmd_flags;
2462#define MBX_INTERRUPT 1 2442#define MBX_INTERRUPT 1
2463#define MBX_INTR_WAIT 2 2443#define MBX_INTR_WAIT 2
2464#define MBX_UPDATE_FLASH_ACTIVE 3 2444#define MBX_UPDATE_FLASH_ACTIVE 3
2465 2445
2466 struct mutex vport_lock; /* Virtual port synchronization */ 2446 struct mutex vport_lock; /* Virtual port synchronization */
2467 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2447 struct completion mbx_cmd_comp; /* Serialize mbx access */
2468 struct completion mbx_intr_comp; /* Used for completion notification */ 2448 struct completion mbx_intr_comp; /* Used for completion notification */
2469 2449
2470 uint32_t mbx_flags; 2450 uint32_t mbx_flags;
2471#define MBX_IN_PROGRESS BIT_0 2451#define MBX_IN_PROGRESS BIT_0
2472#define MBX_BUSY BIT_1 /* Got the Access */ 2452#define MBX_BUSY BIT_1 /* Got the Access */
2473#define MBX_SLEEPING_ON_SEM BIT_2 2453#define MBX_SLEEPING_ON_SEM BIT_2
2474#define MBX_POLLING_FOR_COMP BIT_3 2454#define MBX_POLLING_FOR_COMP BIT_3
2475#define MBX_COMPLETED BIT_4 2455#define MBX_COMPLETED BIT_4
@@ -2488,7 +2468,7 @@ typedef struct scsi_qla_host {
2488#define RISC_START_ADDRESS_2300 0x800 2468#define RISC_START_ADDRESS_2300 0x800
2489#define RISC_START_ADDRESS_2400 0x100000 2469#define RISC_START_ADDRESS_2400 0x100000
2490 2470
2491 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ 2471 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
2492 uint8_t fw_seriallink_options[4]; 2472 uint8_t fw_seriallink_options[4];
2493 uint16_t fw_seriallink_options24[4]; 2473 uint16_t fw_seriallink_options24[4];
2494 2474
@@ -2509,10 +2489,10 @@ typedef struct scsi_qla_host {
2509 uint64_t fce_wr, fce_rd; 2489 uint64_t fce_wr, fce_rd;
2510 struct mutex fce_mutex; 2490 struct mutex fce_mutex;
2511 2491
2492 uint32_t hw_event_start;
2512 uint32_t hw_event_ptr; 2493 uint32_t hw_event_ptr;
2513 uint32_t hw_event_pause_errors; 2494 uint32_t hw_event_pause_errors;
2514 2495
2515 uint8_t host_str[16];
2516 uint32_t pci_attr; 2496 uint32_t pci_attr;
2517 uint16_t chip_revision; 2497 uint16_t chip_revision;
2518 2498
@@ -2523,11 +2503,6 @@ typedef struct scsi_qla_host {
2523 char model_desc[80]; 2503 char model_desc[80];
2524 uint8_t adapter_id[16+1]; 2504 uint8_t adapter_id[16+1];
2525 2505
2526 uint8_t *node_name;
2527 uint8_t *port_name;
2528 uint8_t fabric_node_name[WWN_SIZE];
2529 uint32_t isp_abort_cnt;
2530
2531 /* Option ROM information. */ 2506 /* Option ROM information. */
2532 char *optrom_buffer; 2507 char *optrom_buffer;
2533 uint32_t optrom_size; 2508 uint32_t optrom_size;
@@ -2538,13 +2513,13 @@ typedef struct scsi_qla_host {
2538 uint32_t optrom_region_start; 2513 uint32_t optrom_region_start;
2539 uint32_t optrom_region_size; 2514 uint32_t optrom_region_size;
2540 2515
2541 /* PCI expansion ROM image information. */ 2516/* PCI expansion ROM image information. */
2542#define ROM_CODE_TYPE_BIOS 0 2517#define ROM_CODE_TYPE_BIOS 0
2543#define ROM_CODE_TYPE_FCODE 1 2518#define ROM_CODE_TYPE_FCODE 1
2544#define ROM_CODE_TYPE_EFI 3 2519#define ROM_CODE_TYPE_EFI 3
2545 uint8_t bios_revision[2]; 2520 uint8_t bios_revision[2];
2546 uint8_t efi_revision[2]; 2521 uint8_t efi_revision[2];
2547 uint8_t fcode_revision[16]; 2522 uint8_t fcode_revision[16];
2548 uint32_t fw_revision[4]; 2523 uint32_t fw_revision[4];
2549 2524
2550 uint32_t fdt_wrt_disable; 2525 uint32_t fdt_wrt_disable;
@@ -2553,39 +2528,144 @@ typedef struct scsi_qla_host {
2553 uint32_t fdt_unprotect_sec_cmd; 2528 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2529 uint32_t fdt_protect_sec_cmd;
2555 2530
2556 uint32_t flt_region_flt; 2531 uint32_t flt_region_flt;
2557 uint32_t flt_region_fdt; 2532 uint32_t flt_region_fdt;
2558 uint32_t flt_region_boot; 2533 uint32_t flt_region_boot;
2559 uint32_t flt_region_fw; 2534 uint32_t flt_region_fw;
2560 uint32_t flt_region_vpd_nvram; 2535 uint32_t flt_region_vpd_nvram;
2561 uint32_t flt_region_hw_event; 2536 uint32_t flt_region_hw_event;
2562 uint32_t flt_region_npiv_conf; 2537 uint32_t flt_region_npiv_conf;
2563 2538
2564 /* Needed for BEACON */ 2539 /* Needed for BEACON */
2565 uint16_t beacon_blink_led; 2540 uint16_t beacon_blink_led;
2566 uint8_t beacon_color_state; 2541 uint8_t beacon_color_state;
2567#define QLA_LED_GRN_ON 0x01 2542#define QLA_LED_GRN_ON 0x01
2568#define QLA_LED_YLW_ON 0x02 2543#define QLA_LED_YLW_ON 0x02
2569#define QLA_LED_ABR_ON 0x04 2544#define QLA_LED_ABR_ON 0x04
2570#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ 2545#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2571 /* ISP2322: red, green, amber. */ 2546 /* ISP2322: red, green, amber. */
2572 2547 uint16_t zio_mode;
2573 uint16_t zio_mode; 2548 uint16_t zio_timer;
2574 uint16_t zio_timer;
2575 struct fc_host_statistics fc_host_stat; 2549 struct fc_host_statistics fc_host_stat;
2576 2550
2577 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; 2551 struct qla_msix_entry *msix_entries;
2552
2553 struct list_head vp_list; /* list of VP */
2554 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
2555 sizeof(unsigned long)];
2556 uint16_t num_vhosts; /* number of vports created */
2557 uint16_t num_vsans; /* number of vsan created */
2558 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
2559 int cur_vport_count;
2560
2561 struct qla_chip_state_84xx *cs84xx;
2562 struct qla_statistics qla_stats;
2563 struct isp_operations *isp_ops;
2564};
2565
2566/*
2567 * Qlogic scsi host structure
2568 */
2569typedef struct scsi_qla_host {
2570 struct list_head list;
2571 struct list_head vp_fcports; /* list of fcports */
2572 struct list_head work_list;
2573 /* Commonly used flags and state information. */
2574 struct Scsi_Host *host;
2575 unsigned long host_no;
2576 uint8_t host_str[16];
2577
2578 volatile struct {
2579 uint32_t init_done :1;
2580 uint32_t online :1;
2581 uint32_t rscn_queue_overflow :1;
2582 uint32_t reset_active :1;
2583
2584 uint32_t management_server_logged_in :1;
2585 uint32_t process_response_queue :1;
2586 } flags;
2587
2588 atomic_t loop_state;
2589#define LOOP_TIMEOUT 1
2590#define LOOP_DOWN 2
2591#define LOOP_UP 3
2592#define LOOP_UPDATE 4
2593#define LOOP_READY 5
2594#define LOOP_DEAD 6
2595
2596 unsigned long dpc_flags;
2597#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2598#define RESET_ACTIVE 1
2599#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2600#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2601#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2602#define LOOP_RESYNC_ACTIVE 5
2603#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2604#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2605#define MAILBOX_RETRY 8
2606#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2607#define FAILOVER_EVENT_NEEDED 10
2608#define FAILOVER_EVENT 11
2609#define FAILOVER_NEEDED 12
2610#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2611#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2612#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2613#define ABORT_QUEUES_NEEDED 16
2614#define RELOGIN_NEEDED 17
2615#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2616#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2617#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2618#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2619#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2620#define IOCTL_ERROR_RECOVERY 23
2621#define LOOP_RESET_NEEDED 24
2622#define BEACON_BLINK_NEEDED 25
2623#define REGISTER_FDMI_NEEDED 26
2624#define FCPORT_UPDATE_NEEDED 27
2625#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2626#define UNLOADING 29
2627#define NPIV_CONFIG_NEEDED 30
2628
2629 uint32_t device_flags;
2630#define DFLG_LOCAL_DEVICES BIT_0
2631#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2632#define DFLG_FABRIC_DEVICES BIT_2
2633#define SWITCH_FOUND BIT_3
2634#define DFLG_NO_CABLE BIT_4
2635
2636 srb_t *status_srb; /* Status continuation entry. */
2637
2638 /* ISP configuration data. */
2639 uint16_t loop_id; /* Host adapter loop id */
2640
2641 port_id_t d_id; /* Host adapter port id */
2642 uint8_t marker_needed;
2643 uint16_t mgmt_svr_loop_id;
2644
2645
2646
2647 /* RSCN queue. */
2648 uint32_t rscn_queue[MAX_RSCN_COUNT];
2649 uint8_t rscn_in_ptr;
2650 uint8_t rscn_out_ptr;
2651
2652 /* Timeout timers. */
2653 uint8_t loop_down_abort_time; /* port down timer */
2654 atomic_t loop_down_timer; /* loop down timer */
2655 uint8_t link_down_timeout; /* link down timeout */
2656
2657 uint32_t timer_active;
2658 struct timer_list timer;
2659
2660 uint8_t node_name[WWN_SIZE];
2661 uint8_t port_name[WWN_SIZE];
2662 uint8_t fabric_node_name[WWN_SIZE];
2663 uint32_t vp_abort_cnt;
2578 2664
2579 struct list_head vp_list; /* list of VP */
2580 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2665 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
2581 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
2582 uint16_t num_vhosts; /* number of vports created */
2583 uint16_t num_vsans; /* number of vsan created */
2584 uint16_t vp_idx; /* vport ID */ 2666 uint16_t vp_idx; /* vport ID */
2585 2667
2586 struct scsi_qla_host *parent; /* holds pport */
2587 unsigned long vp_flags; 2668 unsigned long vp_flags;
2588 struct list_head vp_fcports; /* list of fcports */
2589#define VP_IDX_ACQUIRED 0 /* bit no 0 */ 2669#define VP_IDX_ACQUIRED 0 /* bit no 0 */
2590#define VP_CREATE_NEEDED 1 2670#define VP_CREATE_NEEDED 1
2591#define VP_BIND_NEEDED 2 2671#define VP_BIND_NEEDED 2
@@ -2604,14 +2684,10 @@ typedef struct scsi_qla_host {
2604#define VP_ERR_FAB_NORESOURCES 3 2684#define VP_ERR_FAB_NORESOURCES 3
2605#define VP_ERR_FAB_LOGOUT 4 2685#define VP_ERR_FAB_LOGOUT 4
2606#define VP_ERR_ADAP_NORESOURCES 5 2686#define VP_ERR_ADAP_NORESOURCES 5
2607 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ 2687 struct qla_hw_data *hw;
2608 int cur_vport_count; 2688 int req_ques[QLA_MAX_HOST_QUES];
2609
2610 struct qla_chip_state_84xx *cs84xx;
2611 struct qla_statistics qla_stats;
2612} scsi_qla_host_t; 2689} scsi_qla_host_t;
2613 2690
2614
2615/* 2691/*
2616 * Macros to help code, maintain, etc. 2692 * Macros to help code, maintain, etc.
2617 */ 2693 */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 561a4411719d..0e366a1b44b3 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -15,10 +15,11 @@ static atomic_t qla2x00_dfs_root_count;
15static int 15static int
16qla2x00_dfs_fce_show(struct seq_file *s, void *unused) 16qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
17{ 17{
18 scsi_qla_host_t *ha = s->private; 18 scsi_qla_host_t *vha = s->private;
19 uint32_t cnt; 19 uint32_t cnt;
20 uint32_t *fce; 20 uint32_t *fce;
21 uint64_t fce_start; 21 uint64_t fce_start;
22 struct qla_hw_data *ha = vha->hw;
22 23
23 mutex_lock(&ha->fce_mutex); 24 mutex_lock(&ha->fce_mutex);
24 25
@@ -51,7 +52,8 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
51static int 52static int
52qla2x00_dfs_fce_open(struct inode *inode, struct file *file) 53qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
53{ 54{
54 scsi_qla_host_t *ha = inode->i_private; 55 scsi_qla_host_t *vha = inode->i_private;
56 struct qla_hw_data *ha = vha->hw;
55 int rval; 57 int rval;
56 58
57 if (!ha->flags.fce_enabled) 59 if (!ha->flags.fce_enabled)
@@ -60,7 +62,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
60 mutex_lock(&ha->fce_mutex); 62 mutex_lock(&ha->fce_mutex);
61 63
62 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
63 rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
64 if (rval) 66 if (rval)
65 qla_printk(KERN_WARNING, ha, 67 qla_printk(KERN_WARNING, ha,
66 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
@@ -75,7 +77,8 @@ out:
75static int 77static int
76qla2x00_dfs_fce_release(struct inode *inode, struct file *file) 78qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
77{ 79{
78 scsi_qla_host_t *ha = inode->i_private; 80 scsi_qla_host_t *vha = inode->i_private;
81 struct qla_hw_data *ha = vha->hw;
79 int rval; 82 int rval;
80 83
81 if (ha->flags.fce_enabled) 84 if (ha->flags.fce_enabled)
@@ -86,7 +89,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
86 /* Re-enable FCE tracing. */ 89 /* Re-enable FCE tracing. */
87 ha->flags.fce_enabled = 1; 90 ha->flags.fce_enabled = 1;
88 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); 91 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
89 rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
90 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
91 if (rval) { 94 if (rval) {
92 qla_printk(KERN_WARNING, ha, 95 qla_printk(KERN_WARNING, ha,
@@ -107,8 +110,9 @@ static const struct file_operations dfs_fce_ops = {
107}; 110};
108 111
109int 112int
110qla2x00_dfs_setup(scsi_qla_host_t *ha) 113qla2x00_dfs_setup(scsi_qla_host_t *vha)
111{ 114{
115 struct qla_hw_data *ha = vha->hw;
112 if (!IS_QLA25XX(ha)) 116 if (!IS_QLA25XX(ha))
113 goto out; 117 goto out;
114 if (!ha->fce) 118 if (!ha->fce)
@@ -130,7 +134,7 @@ create_dir:
130 goto create_nodes; 134 goto create_nodes;
131 135
132 mutex_init(&ha->fce_mutex); 136 mutex_init(&ha->fce_mutex);
133 ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root); 137 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
134 if (!ha->dfs_dir) { 138 if (!ha->dfs_dir) {
135 qla_printk(KERN_NOTICE, ha, 139 qla_printk(KERN_NOTICE, ha,
136 "DebugFS: Unable to create ha directory.\n"); 140 "DebugFS: Unable to create ha directory.\n");
@@ -152,8 +156,9 @@ out:
152} 156}
153 157
154int 158int
155qla2x00_dfs_remove(scsi_qla_host_t *ha) 159qla2x00_dfs_remove(scsi_qla_host_t *vha)
156{ 160{
161 struct qla_hw_data *ha = vha->hw;
157 if (ha->dfs_fce) { 162 if (ha->dfs_fce) {
158 debugfs_remove(ha->dfs_fce); 163 debugfs_remove(ha->dfs_fce);
159 ha->dfs_fce = NULL; 164 ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d1d14202575a..ee1f1e794c2d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -299,7 +299,8 @@ struct init_cb_24xx {
299 uint32_t response_q_address[2]; 299 uint32_t response_q_address[2];
300 uint32_t prio_request_q_address[2]; 300 uint32_t prio_request_q_address[2];
301 301
302 uint8_t reserved_2[8]; 302 uint16_t msix;
303 uint8_t reserved_2[6];
303 304
304 uint16_t atio_q_inpointer; 305 uint16_t atio_q_inpointer;
305 uint16_t atio_q_length; 306 uint16_t atio_q_length;
@@ -372,8 +373,9 @@ struct init_cb_24xx {
372 * BIT 17-31 = Reserved 373 * BIT 17-31 = Reserved
373 */ 374 */
374 uint32_t firmware_options_3; 375 uint32_t firmware_options_3;
375 376 uint16_t qos;
376 uint8_t reserved_3[24]; 377 uint16_t rid;
378 uint8_t reserved_3[20];
377}; 379};
378 380
379/* 381/*
@@ -754,7 +756,8 @@ struct abort_entry_24xx {
754 756
755 uint32_t handle_to_abort; /* System handle to abort. */ 757 uint32_t handle_to_abort; /* System handle to abort. */
756 758
757 uint8_t reserved_1[32]; 759 uint16_t req_que_no;
760 uint8_t reserved_1[30];
758 761
759 uint8_t port_id[3]; /* PortID of destination port. */ 762 uint8_t port_id[3]; /* PortID of destination port. */
760 uint8_t vp_index; 763 uint8_t vp_index;
@@ -1258,7 +1261,8 @@ struct qla_npiv_header {
1258struct qla_npiv_entry { 1261struct qla_npiv_entry {
1259 uint16_t flags; 1262 uint16_t flags;
1260 uint16_t vf_id; 1263 uint16_t vf_id;
1261 uint16_t qos; 1264 uint8_t q_qos;
1265 uint8_t f_qos;
1262 uint16_t unused1; 1266 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE]; 1267 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE]; 1268 uint8_t node_name[WWN_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 753dbe6cce6e..0011e31205db 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int ql2xiidmaenable; 65extern int ql2xiidmaenable;
66extern int ql2xmaxqueues;
66 67
67extern int qla2x00_loop_reset(scsi_qla_host_t *); 68extern int qla2x00_loop_reset(scsi_qla_host_t *);
68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -72,7 +73,10 @@ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
72 uint16_t, uint16_t); 73 uint16_t, uint16_t);
73 74
74extern void qla2x00_abort_fcport_cmds(fc_port_t *); 75extern void qla2x00_abort_fcport_cmds(fc_port_t *);
75 76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
77 struct qla_hw_data *);
78extern void qla2x00_free_host(struct scsi_qla_host *);
79extern void qla2x00_relogin(struct scsi_qla_host *);
76/* 80/*
77 * Global Functions in qla_mid.c source file. 81 * Global Functions in qla_mid.c source file.
78 */ 82 */
@@ -94,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
94extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 98extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
95extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 99extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
96 100
97extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 101extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
98 102
99extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 103extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
100 104
@@ -105,10 +109,11 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
105 109
106extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 110extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
107 111
108extern void qla2xxx_wake_dpc(scsi_qla_host_t *); 112extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
109extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *); 113extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
110extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *); 114extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
111extern void qla2x00_vp_abort_isp(scsi_qla_host_t *); 115 uint16_t *);
116extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
112 117
113/* 118/*
114 * Global Function Prototypes in qla_iocb.c source file. 119 * Global Function Prototypes in qla_iocb.c source file.
@@ -119,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
119extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); 124extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
120extern int qla2x00_start_scsi(srb_t *sp); 125extern int qla2x00_start_scsi(srb_t *sp);
121extern int qla24xx_start_scsi(srb_t *sp); 126extern int qla24xx_start_scsi(srb_t *sp);
122int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 127int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
123int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 128 uint16_t, uint16_t, uint8_t);
129int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
130 uint16_t, uint16_t, uint8_t);
124 131
125/* 132/*
126 * Global Function Prototypes in qla_mbx.c source file. 133 * Global Function Prototypes in qla_mbx.c source file.
@@ -154,7 +161,7 @@ extern int
154qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 161qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
155 162
156extern int 163extern int
157qla2x00_abort_command(scsi_qla_host_t *, srb_t *); 164qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
158 165
159extern int 166extern int
160qla2x00_abort_target(struct fc_port *, unsigned int); 167qla2x00_abort_target(struct fc_port *, unsigned int);
@@ -225,7 +232,7 @@ extern int
225qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 232qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
226 dma_addr_t); 233 dma_addr_t);
227 234
228extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 235extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
229extern int qla24xx_abort_target(struct fc_port *, unsigned int); 236extern int qla24xx_abort_target(struct fc_port *, unsigned int);
230extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 237extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
231 238
@@ -264,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
264extern irqreturn_t qla2100_intr_handler(int, void *); 271extern irqreturn_t qla2100_intr_handler(int, void *);
265extern irqreturn_t qla2300_intr_handler(int, void *); 272extern irqreturn_t qla2300_intr_handler(int, void *);
266extern irqreturn_t qla24xx_intr_handler(int, void *); 273extern irqreturn_t qla24xx_intr_handler(int, void *);
267extern void qla2x00_process_response_queue(struct scsi_qla_host *); 274extern void qla2x00_process_response_queue(struct rsp_que *);
268extern void qla24xx_process_response_queue(struct scsi_qla_host *); 275extern void qla24xx_process_response_queue(struct rsp_que *);
269 276
270extern int qla2x00_request_irqs(scsi_qla_host_t *); 277extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
271extern void qla2x00_free_irqs(scsi_qla_host_t *); 278extern void qla2x00_free_irqs(scsi_qla_host_t *);
272 279
273/* 280/*
@@ -367,4 +374,27 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
367 */ 374 */
368extern int qla2x00_dfs_setup(scsi_qla_host_t *); 375extern int qla2x00_dfs_setup(scsi_qla_host_t *);
369extern int qla2x00_dfs_remove(scsi_qla_host_t *); 376extern int qla2x00_dfs_remove(scsi_qla_host_t *);
377
378/* Globa function prototypes for multi-q */
379extern int qla25xx_request_irq(struct rsp_que *);
380extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *,
381 uint8_t);
382extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *,
383 uint8_t);
384extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
385 uint16_t, uint8_t, uint8_t);
386extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
387 uint16_t);
388extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
389extern void qla2x00_init_response_q_entries(struct rsp_que *);
390extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
391extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
392extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
393extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
394extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
395extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
396extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
397extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
398extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
399extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
370#endif /* _QLA_GBL_H */ 400#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c2a4bfbcb05b..0a6f72973996 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -22,8 +22,9 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
22 * Returns a pointer to the @ha's ms_iocb. 22 * Returns a pointer to the @ha's ms_iocb.
23 */ 23 */
24void * 24void *
25qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 25qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
26{ 26{
27 struct qla_hw_data *ha = vha->hw;
27 ms_iocb_entry_t *ms_pkt; 28 ms_iocb_entry_t *ms_pkt;
28 29
29 ms_pkt = ha->ms_iocb; 30 ms_pkt = ha->ms_iocb;
@@ -59,8 +60,9 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
59 * Returns a pointer to the @ha's ms_iocb. 60 * Returns a pointer to the @ha's ms_iocb.
60 */ 61 */
61void * 62void *
62qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 63qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
63{ 64{
65 struct qla_hw_data *ha = vha->hw;
64 struct ct_entry_24xx *ct_pkt; 66 struct ct_entry_24xx *ct_pkt;
65 67
66 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 68 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -82,7 +84,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
82 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 84 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
83 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 85 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
84 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 86 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
85 ct_pkt->vp_index = ha->vp_idx; 87 ct_pkt->vp_index = vha->vp_idx;
86 88
87 return (ct_pkt); 89 return (ct_pkt);
88} 90}
@@ -110,16 +112,17 @@ qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size)
110} 112}
111 113
112static int 114static int
113qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt, 115qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
114 struct ct_sns_rsp *ct_rsp, const char *routine) 116 struct ct_sns_rsp *ct_rsp, const char *routine)
115{ 117{
116 int rval; 118 int rval;
117 uint16_t comp_status; 119 uint16_t comp_status;
120 struct qla_hw_data *ha = vha->hw;
118 121
119 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
120 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
121 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", 124 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
122 ha->host_no, routine, ms_pkt->entry_status)); 125 vha->host_no, routine, ms_pkt->entry_status));
123 } else { 126 } else {
124 if (IS_FWI2_CAPABLE(ha)) 127 if (IS_FWI2_CAPABLE(ha))
125 comp_status = le16_to_cpu( 128 comp_status = le16_to_cpu(
@@ -133,7 +136,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
133 if (ct_rsp->header.response != 136 if (ct_rsp->header.response !=
134 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 137 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
135 DEBUG2_3(printk("scsi(%ld): %s failed, " 138 DEBUG2_3(printk("scsi(%ld): %s failed, "
136 "rejected request:\n", ha->host_no, 139 "rejected request:\n", vha->host_no,
137 routine)); 140 routine));
138 DEBUG2_3(qla2x00_dump_buffer( 141 DEBUG2_3(qla2x00_dump_buffer(
139 (uint8_t *)&ct_rsp->header, 142 (uint8_t *)&ct_rsp->header,
@@ -144,7 +147,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
144 break; 147 break;
145 default: 148 default:
146 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 149 DEBUG2_3(printk("scsi(%ld): %s failed, completion "
147 "status (%x).\n", ha->host_no, routine, 150 "status (%x).\n", vha->host_no, routine,
148 comp_status)); 151 comp_status));
149 break; 152 break;
150 } 153 }
@@ -160,21 +163,21 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
160 * Returns 0 on success. 163 * Returns 0 on success.
161 */ 164 */
162int 165int
163qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 166qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
164{ 167{
165 int rval; 168 int rval;
166 169
167 ms_iocb_entry_t *ms_pkt; 170 ms_iocb_entry_t *ms_pkt;
168 struct ct_sns_req *ct_req; 171 struct ct_sns_req *ct_req;
169 struct ct_sns_rsp *ct_rsp; 172 struct ct_sns_rsp *ct_rsp;
173 struct qla_hw_data *ha = vha->hw;
170 174
171 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 175 if (IS_QLA2100(ha) || IS_QLA2200(ha))
172 return (qla2x00_sns_ga_nxt(ha, fcport)); 176 return qla2x00_sns_ga_nxt(vha, fcport);
173 }
174 177
175 /* Issue GA_NXT */ 178 /* Issue GA_NXT */
176 /* Prepare common MS IOCB */ 179 /* Prepare common MS IOCB */
177 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE, 180 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
178 GA_NXT_RSP_SIZE); 181 GA_NXT_RSP_SIZE);
179 182
180 /* Prepare CT request */ 183 /* Prepare CT request */
@@ -188,13 +191,13 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
188 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; 191 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
189 192
190 /* Execute MS IOCB */ 193 /* Execute MS IOCB */
191 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 194 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
192 sizeof(ms_iocb_entry_t)); 195 sizeof(ms_iocb_entry_t));
193 if (rval != QLA_SUCCESS) { 196 if (rval != QLA_SUCCESS) {
194 /*EMPTY*/ 197 /*EMPTY*/
195 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 198 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
196 ha->host_no, rval)); 199 vha->host_no, rval));
197 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GA_NXT") != 200 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
198 QLA_SUCCESS) { 201 QLA_SUCCESS) {
199 rval = QLA_FUNCTION_FAILED; 202 rval = QLA_FUNCTION_FAILED;
200 } else { 203 } else {
@@ -216,7 +219,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
216 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 219 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
217 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 220 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
218 "portid=%02x%02x%02x.\n", 221 "portid=%02x%02x%02x.\n",
219 ha->host_no, 222 vha->host_no,
220 fcport->node_name[0], fcport->node_name[1], 223 fcport->node_name[0], fcport->node_name[1],
221 fcport->node_name[2], fcport->node_name[3], 224 fcport->node_name[2], fcport->node_name[3],
222 fcport->node_name[4], fcport->node_name[5], 225 fcport->node_name[4], fcport->node_name[5],
@@ -242,7 +245,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
242 * Returns 0 on success. 245 * Returns 0 on success.
243 */ 246 */
244int 247int
245qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 248qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
246{ 249{
247 int rval; 250 int rval;
248 uint16_t i; 251 uint16_t i;
@@ -252,16 +255,16 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
252 struct ct_sns_rsp *ct_rsp; 255 struct ct_sns_rsp *ct_rsp;
253 256
254 struct ct_sns_gid_pt_data *gid_data; 257 struct ct_sns_gid_pt_data *gid_data;
258 struct qla_hw_data *ha = vha->hw;
255 259
256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 260 if (IS_QLA2100(ha) || IS_QLA2200(ha))
257 return (qla2x00_sns_gid_pt(ha, list)); 261 return qla2x00_sns_gid_pt(vha, list);
258 }
259 262
260 gid_data = NULL; 263 gid_data = NULL;
261 264
262 /* Issue GID_PT */ 265 /* Issue GID_PT */
263 /* Prepare common MS IOCB */ 266 /* Prepare common MS IOCB */
264 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE, 267 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
265 GID_PT_RSP_SIZE); 268 GID_PT_RSP_SIZE);
266 269
267 /* Prepare CT request */ 270 /* Prepare CT request */
@@ -273,13 +276,13 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
273 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 276 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
274 277
275 /* Execute MS IOCB */ 278 /* Execute MS IOCB */
276 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 279 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
277 sizeof(ms_iocb_entry_t)); 280 sizeof(ms_iocb_entry_t));
278 if (rval != QLA_SUCCESS) { 281 if (rval != QLA_SUCCESS) {
279 /*EMPTY*/ 282 /*EMPTY*/
280 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 283 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
281 ha->host_no, rval)); 284 vha->host_no, rval));
282 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GID_PT") != 285 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
283 QLA_SUCCESS) { 286 QLA_SUCCESS) {
284 rval = QLA_FUNCTION_FAILED; 287 rval = QLA_FUNCTION_FAILED;
285 } else { 288 } else {
@@ -320,7 +323,7 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
320 * Returns 0 on success. 323 * Returns 0 on success.
321 */ 324 */
322int 325int
323qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 326qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
324{ 327{
325 int rval; 328 int rval;
326 uint16_t i; 329 uint16_t i;
@@ -328,15 +331,15 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
328 ms_iocb_entry_t *ms_pkt; 331 ms_iocb_entry_t *ms_pkt;
329 struct ct_sns_req *ct_req; 332 struct ct_sns_req *ct_req;
330 struct ct_sns_rsp *ct_rsp; 333 struct ct_sns_rsp *ct_rsp;
334 struct qla_hw_data *ha = vha->hw;
331 335
332 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 336 if (IS_QLA2100(ha) || IS_QLA2200(ha))
333 return (qla2x00_sns_gpn_id(ha, list)); 337 return qla2x00_sns_gpn_id(vha, list);
334 }
335 338
336 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 339 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
337 /* Issue GPN_ID */ 340 /* Issue GPN_ID */
338 /* Prepare common MS IOCB */ 341 /* Prepare common MS IOCB */
339 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE, 342 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
340 GPN_ID_RSP_SIZE); 343 GPN_ID_RSP_SIZE);
341 344
342 /* Prepare CT request */ 345 /* Prepare CT request */
@@ -350,13 +353,13 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
350 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 353 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
351 354
352 /* Execute MS IOCB */ 355 /* Execute MS IOCB */
353 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 356 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
354 sizeof(ms_iocb_entry_t)); 357 sizeof(ms_iocb_entry_t));
355 if (rval != QLA_SUCCESS) { 358 if (rval != QLA_SUCCESS) {
356 /*EMPTY*/ 359 /*EMPTY*/
357 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 360 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
358 "(%d).\n", ha->host_no, rval)); 361 "(%d).\n", vha->host_no, rval));
359 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 362 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
360 "GPN_ID") != QLA_SUCCESS) { 363 "GPN_ID") != QLA_SUCCESS) {
361 rval = QLA_FUNCTION_FAILED; 364 rval = QLA_FUNCTION_FAILED;
362 } else { 365 } else {
@@ -381,23 +384,22 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
381 * Returns 0 on success. 384 * Returns 0 on success.
382 */ 385 */
383int 386int
384qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 387qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
385{ 388{
386 int rval; 389 int rval;
387 uint16_t i; 390 uint16_t i;
388 391 struct qla_hw_data *ha = vha->hw;
389 ms_iocb_entry_t *ms_pkt; 392 ms_iocb_entry_t *ms_pkt;
390 struct ct_sns_req *ct_req; 393 struct ct_sns_req *ct_req;
391 struct ct_sns_rsp *ct_rsp; 394 struct ct_sns_rsp *ct_rsp;
392 395
393 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 396 if (IS_QLA2100(ha) || IS_QLA2200(ha))
394 return (qla2x00_sns_gnn_id(ha, list)); 397 return qla2x00_sns_gnn_id(vha, list);
395 }
396 398
397 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 399 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
398 /* Issue GNN_ID */ 400 /* Issue GNN_ID */
399 /* Prepare common MS IOCB */ 401 /* Prepare common MS IOCB */
400 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE, 402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
401 GNN_ID_RSP_SIZE); 403 GNN_ID_RSP_SIZE);
402 404
403 /* Prepare CT request */ 405 /* Prepare CT request */
@@ -411,13 +413,13 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
411 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 413 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
412 414
413 /* Execute MS IOCB */ 415 /* Execute MS IOCB */
414 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 416 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
415 sizeof(ms_iocb_entry_t)); 417 sizeof(ms_iocb_entry_t));
416 if (rval != QLA_SUCCESS) { 418 if (rval != QLA_SUCCESS) {
417 /*EMPTY*/ 419 /*EMPTY*/
418 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 420 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
419 "(%d).\n", ha->host_no, rval)); 421 "(%d).\n", vha->host_no, rval));
420 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
421 "GNN_ID") != QLA_SUCCESS) { 423 "GNN_ID") != QLA_SUCCESS) {
422 rval = QLA_FUNCTION_FAILED; 424 rval = QLA_FUNCTION_FAILED;
423 } else { 425 } else {
@@ -429,7 +431,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
429 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 431 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
430 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 432 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
431 "portid=%02x%02x%02x.\n", 433 "portid=%02x%02x%02x.\n",
432 ha->host_no, 434 vha->host_no,
433 list[i].node_name[0], list[i].node_name[1], 435 list[i].node_name[0], list[i].node_name[1],
434 list[i].node_name[2], list[i].node_name[3], 436 list[i].node_name[2], list[i].node_name[3],
435 list[i].node_name[4], list[i].node_name[5], 437 list[i].node_name[4], list[i].node_name[5],
@@ -457,21 +459,20 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
457 * Returns 0 on success. 459 * Returns 0 on success.
458 */ 460 */
459int 461int
460qla2x00_rft_id(scsi_qla_host_t *ha) 462qla2x00_rft_id(scsi_qla_host_t *vha)
461{ 463{
462 int rval; 464 int rval;
463 465 struct qla_hw_data *ha = vha->hw;
464 ms_iocb_entry_t *ms_pkt; 466 ms_iocb_entry_t *ms_pkt;
465 struct ct_sns_req *ct_req; 467 struct ct_sns_req *ct_req;
466 struct ct_sns_rsp *ct_rsp; 468 struct ct_sns_rsp *ct_rsp;
467 469
468 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 470 if (IS_QLA2100(ha) || IS_QLA2200(ha))
469 return (qla2x00_sns_rft_id(ha)); 471 return qla2x00_sns_rft_id(vha);
470 }
471 472
472 /* Issue RFT_ID */ 473 /* Issue RFT_ID */
473 /* Prepare common MS IOCB */ 474 /* Prepare common MS IOCB */
474 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE, 475 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
475 RFT_ID_RSP_SIZE); 476 RFT_ID_RSP_SIZE);
476 477
477 /* Prepare CT request */ 478 /* Prepare CT request */
@@ -480,25 +481,25 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
480 ct_rsp = &ha->ct_sns->p.rsp; 481 ct_rsp = &ha->ct_sns->p.rsp;
481 482
482 /* Prepare CT arguments -- port_id, FC-4 types */ 483 /* Prepare CT arguments -- port_id, FC-4 types */
483 ct_req->req.rft_id.port_id[0] = ha->d_id.b.domain; 484 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
484 ct_req->req.rft_id.port_id[1] = ha->d_id.b.area; 485 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
485 ct_req->req.rft_id.port_id[2] = ha->d_id.b.al_pa; 486 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
486 487
487 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 488 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
488 489
489 /* Execute MS IOCB */ 490 /* Execute MS IOCB */
490 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 491 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
491 sizeof(ms_iocb_entry_t)); 492 sizeof(ms_iocb_entry_t));
492 if (rval != QLA_SUCCESS) { 493 if (rval != QLA_SUCCESS) {
493 /*EMPTY*/ 494 /*EMPTY*/
494 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 495 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
495 ha->host_no, rval)); 496 vha->host_no, rval));
496 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFT_ID") != 497 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
497 QLA_SUCCESS) { 498 QLA_SUCCESS) {
498 rval = QLA_FUNCTION_FAILED; 499 rval = QLA_FUNCTION_FAILED;
499 } else { 500 } else {
500 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 501 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
501 ha->host_no)); 502 vha->host_no));
502 } 503 }
503 504
504 return (rval); 505 return (rval);
@@ -511,23 +512,23 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
511 * Returns 0 on success. 512 * Returns 0 on success.
512 */ 513 */
513int 514int
514qla2x00_rff_id(scsi_qla_host_t *ha) 515qla2x00_rff_id(scsi_qla_host_t *vha)
515{ 516{
516 int rval; 517 int rval;
517 518 struct qla_hw_data *ha = vha->hw;
518 ms_iocb_entry_t *ms_pkt; 519 ms_iocb_entry_t *ms_pkt;
519 struct ct_sns_req *ct_req; 520 struct ct_sns_req *ct_req;
520 struct ct_sns_rsp *ct_rsp; 521 struct ct_sns_rsp *ct_rsp;
521 522
522 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 523 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
523 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 524 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
524 "ISP2100/ISP2200.\n", ha->host_no)); 525 "ISP2100/ISP2200.\n", vha->host_no));
525 return (QLA_SUCCESS); 526 return (QLA_SUCCESS);
526 } 527 }
527 528
528 /* Issue RFF_ID */ 529 /* Issue RFF_ID */
529 /* Prepare common MS IOCB */ 530 /* Prepare common MS IOCB */
530 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE, 531 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
531 RFF_ID_RSP_SIZE); 532 RFF_ID_RSP_SIZE);
532 533
533 /* Prepare CT request */ 534 /* Prepare CT request */
@@ -536,26 +537,26 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
536 ct_rsp = &ha->ct_sns->p.rsp; 537 ct_rsp = &ha->ct_sns->p.rsp;
537 538
538 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 539 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
539 ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain; 540 ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
540 ct_req->req.rff_id.port_id[1] = ha->d_id.b.area; 541 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
541 ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa; 542 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
542 543
543 ct_req->req.rff_id.fc4_feature = BIT_1; 544 ct_req->req.rff_id.fc4_feature = BIT_1;
544 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 545 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
545 546
546 /* Execute MS IOCB */ 547 /* Execute MS IOCB */
547 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 548 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
548 sizeof(ms_iocb_entry_t)); 549 sizeof(ms_iocb_entry_t));
549 if (rval != QLA_SUCCESS) { 550 if (rval != QLA_SUCCESS) {
550 /*EMPTY*/ 551 /*EMPTY*/
551 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 552 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
552 ha->host_no, rval)); 553 vha->host_no, rval));
553 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") != 554 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
554 QLA_SUCCESS) { 555 QLA_SUCCESS) {
555 rval = QLA_FUNCTION_FAILED; 556 rval = QLA_FUNCTION_FAILED;
556 } else { 557 } else {
557 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 558 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
558 ha->host_no)); 559 vha->host_no));
559 } 560 }
560 561
561 return (rval); 562 return (rval);
@@ -568,21 +569,20 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
568 * Returns 0 on success. 569 * Returns 0 on success.
569 */ 570 */
570int 571int
571qla2x00_rnn_id(scsi_qla_host_t *ha) 572qla2x00_rnn_id(scsi_qla_host_t *vha)
572{ 573{
573 int rval; 574 int rval;
574 575 struct qla_hw_data *ha = vha->hw;
575 ms_iocb_entry_t *ms_pkt; 576 ms_iocb_entry_t *ms_pkt;
576 struct ct_sns_req *ct_req; 577 struct ct_sns_req *ct_req;
577 struct ct_sns_rsp *ct_rsp; 578 struct ct_sns_rsp *ct_rsp;
578 579
579 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 580 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return (qla2x00_sns_rnn_id(ha)); 581 return qla2x00_sns_rnn_id(vha);
581 }
582 582
583 /* Issue RNN_ID */ 583 /* Issue RNN_ID */
584 /* Prepare common MS IOCB */ 584 /* Prepare common MS IOCB */
585 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE, 585 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
586 RNN_ID_RSP_SIZE); 586 RNN_ID_RSP_SIZE);
587 587
588 /* Prepare CT request */ 588 /* Prepare CT request */
@@ -591,33 +591,34 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
591 ct_rsp = &ha->ct_sns->p.rsp; 591 ct_rsp = &ha->ct_sns->p.rsp;
592 592
593 /* Prepare CT arguments -- port_id, node_name */ 593 /* Prepare CT arguments -- port_id, node_name */
594 ct_req->req.rnn_id.port_id[0] = ha->d_id.b.domain; 594 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
595 ct_req->req.rnn_id.port_id[1] = ha->d_id.b.area; 595 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
596 ct_req->req.rnn_id.port_id[2] = ha->d_id.b.al_pa; 596 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
597 597
598 memcpy(ct_req->req.rnn_id.node_name, ha->node_name, WWN_SIZE); 598 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
599 599
600 /* Execute MS IOCB */ 600 /* Execute MS IOCB */
601 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 601 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
602 sizeof(ms_iocb_entry_t)); 602 sizeof(ms_iocb_entry_t));
603 if (rval != QLA_SUCCESS) { 603 if (rval != QLA_SUCCESS) {
604 /*EMPTY*/ 604 /*EMPTY*/
605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
606 ha->host_no, rval)); 606 vha->host_no, rval));
607 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RNN_ID") != 607 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
608 QLA_SUCCESS) { 608 QLA_SUCCESS) {
609 rval = QLA_FUNCTION_FAILED; 609 rval = QLA_FUNCTION_FAILED;
610 } else { 610 } else {
611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
612 ha->host_no)); 612 vha->host_no));
613 } 613 }
614 614
615 return (rval); 615 return (rval);
616} 616}
617 617
618void 618void
619qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn) 619qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
620{ 620{
621 struct qla_hw_data *ha = vha->hw;
621 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number, 622 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
622 ha->fw_major_version, ha->fw_minor_version, 623 ha->fw_major_version, ha->fw_minor_version,
623 ha->fw_subminor_version, qla2x00_version_str); 624 ha->fw_subminor_version, qla2x00_version_str);
@@ -630,23 +631,24 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
630 * Returns 0 on success. 631 * Returns 0 on success.
631 */ 632 */
632int 633int
633qla2x00_rsnn_nn(scsi_qla_host_t *ha) 634qla2x00_rsnn_nn(scsi_qla_host_t *vha)
634{ 635{
635 int rval; 636 int rval;
637 struct qla_hw_data *ha = vha->hw;
636 ms_iocb_entry_t *ms_pkt; 638 ms_iocb_entry_t *ms_pkt;
637 struct ct_sns_req *ct_req; 639 struct ct_sns_req *ct_req;
638 struct ct_sns_rsp *ct_rsp; 640 struct ct_sns_rsp *ct_rsp;
639 641
640 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 642 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
641 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 643 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
642 "ISP2100/ISP2200.\n", ha->host_no)); 644 "ISP2100/ISP2200.\n", vha->host_no));
643 return (QLA_SUCCESS); 645 return (QLA_SUCCESS);
644 } 646 }
645 647
646 /* Issue RSNN_NN */ 648 /* Issue RSNN_NN */
647 /* Prepare common MS IOCB */ 649 /* Prepare common MS IOCB */
648 /* Request size adjusted after CT preparation */ 650 /* Request size adjusted after CT preparation */
649 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); 651 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
650 652
651 /* Prepare CT request */ 653 /* Prepare CT request */
652 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, 654 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -654,10 +656,10 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
654 ct_rsp = &ha->ct_sns->p.rsp; 656 ct_rsp = &ha->ct_sns->p.rsp;
655 657
656 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 658 /* Prepare CT arguments -- node_name, symbolic node_name, size */
657 memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE); 659 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
658 660
659 /* Prepare the Symbolic Node Name */ 661 /* Prepare the Symbolic Node Name */
660 qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name); 662 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
661 663
662 /* Calculate SNN length */ 664 /* Calculate SNN length */
663 ct_req->req.rsnn_nn.name_len = 665 ct_req->req.rsnn_nn.name_len =
@@ -669,18 +671,18 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
669 ms_pkt->dseg_req_length = ms_pkt->req_bytecount; 671 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
670 672
671 /* Execute MS IOCB */ 673 /* Execute MS IOCB */
672 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 674 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
673 sizeof(ms_iocb_entry_t)); 675 sizeof(ms_iocb_entry_t));
674 if (rval != QLA_SUCCESS) { 676 if (rval != QLA_SUCCESS) {
675 /*EMPTY*/ 677 /*EMPTY*/
676 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 678 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
677 ha->host_no, rval)); 679 vha->host_no, rval));
678 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RSNN_NN") != 680 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
679 QLA_SUCCESS) { 681 QLA_SUCCESS) {
680 rval = QLA_FUNCTION_FAILED; 682 rval = QLA_FUNCTION_FAILED;
681 } else { 683 } else {
682 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 684 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
683 ha->host_no)); 685 vha->host_no));
684 } 686 }
685 687
686 return (rval); 688 return (rval);
@@ -696,11 +698,12 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
696 * Returns a pointer to the @ha's sns_cmd. 698 * Returns a pointer to the @ha's sns_cmd.
697 */ 699 */
698static inline struct sns_cmd_pkt * 700static inline struct sns_cmd_pkt *
699qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len, 701qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
700 uint16_t data_size) 702 uint16_t data_size)
701{ 703{
702 uint16_t wc; 704 uint16_t wc;
703 struct sns_cmd_pkt *sns_cmd; 705 struct sns_cmd_pkt *sns_cmd;
706 struct qla_hw_data *ha = vha->hw;
704 707
705 sns_cmd = ha->sns_cmd; 708 sns_cmd = ha->sns_cmd;
706 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 709 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
@@ -726,15 +729,15 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
726 * Returns 0 on success. 729 * Returns 0 on success.
727 */ 730 */
728static int 731static int
729qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 732qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
730{ 733{
731 int rval; 734 int rval;
732 735 struct qla_hw_data *ha = vha->hw;
733 struct sns_cmd_pkt *sns_cmd; 736 struct sns_cmd_pkt *sns_cmd;
734 737
735 /* Issue GA_NXT. */ 738 /* Issue GA_NXT. */
736 /* Prepare SNS command request. */ 739 /* Prepare SNS command request. */
737 sns_cmd = qla2x00_prep_sns_cmd(ha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 740 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
738 GA_NXT_SNS_DATA_SIZE); 741 GA_NXT_SNS_DATA_SIZE);
739 742
740 /* Prepare SNS command arguments -- port_id. */ 743 /* Prepare SNS command arguments -- port_id. */
@@ -743,16 +746,16 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
743 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 746 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
744 747
745 /* Execute SNS command. */ 748 /* Execute SNS command. */
746 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 749 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
747 sizeof(struct sns_cmd_pkt)); 750 sizeof(struct sns_cmd_pkt));
748 if (rval != QLA_SUCCESS) { 751 if (rval != QLA_SUCCESS) {
749 /*EMPTY*/ 752 /*EMPTY*/
750 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 753 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
751 ha->host_no, rval)); 754 vha->host_no, rval));
752 } else if (sns_cmd->p.gan_data[8] != 0x80 || 755 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
753 sns_cmd->p.gan_data[9] != 0x02) { 756 sns_cmd->p.gan_data[9] != 0x02) {
754 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 757 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
755 "ga_nxt_rsp:\n", ha->host_no)); 758 "ga_nxt_rsp:\n", vha->host_no));
756 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 759 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
757 rval = QLA_FUNCTION_FAILED; 760 rval = QLA_FUNCTION_FAILED;
758 } else { 761 } else {
@@ -772,7 +775,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
772 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 775 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
773 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 776 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
774 "portid=%02x%02x%02x.\n", 777 "portid=%02x%02x%02x.\n",
775 ha->host_no, 778 vha->host_no,
776 fcport->node_name[0], fcport->node_name[1], 779 fcport->node_name[0], fcport->node_name[1],
777 fcport->node_name[2], fcport->node_name[3], 780 fcport->node_name[2], fcport->node_name[3],
778 fcport->node_name[4], fcport->node_name[5], 781 fcport->node_name[4], fcport->node_name[5],
@@ -800,33 +803,33 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
800 * Returns 0 on success. 803 * Returns 0 on success.
801 */ 804 */
802static int 805static int
803qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 806qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
804{ 807{
805 int rval; 808 int rval;
806 809 struct qla_hw_data *ha = vha->hw;
807 uint16_t i; 810 uint16_t i;
808 uint8_t *entry; 811 uint8_t *entry;
809 struct sns_cmd_pkt *sns_cmd; 812 struct sns_cmd_pkt *sns_cmd;
810 813
811 /* Issue GID_PT. */ 814 /* Issue GID_PT. */
812 /* Prepare SNS command request. */ 815 /* Prepare SNS command request. */
813 sns_cmd = qla2x00_prep_sns_cmd(ha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 816 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
814 GID_PT_SNS_DATA_SIZE); 817 GID_PT_SNS_DATA_SIZE);
815 818
816 /* Prepare SNS command arguments -- port_type. */ 819 /* Prepare SNS command arguments -- port_type. */
817 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 820 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
818 821
819 /* Execute SNS command. */ 822 /* Execute SNS command. */
820 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 823 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
821 sizeof(struct sns_cmd_pkt)); 824 sizeof(struct sns_cmd_pkt));
822 if (rval != QLA_SUCCESS) { 825 if (rval != QLA_SUCCESS) {
823 /*EMPTY*/ 826 /*EMPTY*/
824 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 827 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
825 ha->host_no, rval)); 828 vha->host_no, rval));
826 } else if (sns_cmd->p.gid_data[8] != 0x80 || 829 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
827 sns_cmd->p.gid_data[9] != 0x02) { 830 sns_cmd->p.gid_data[9] != 0x02) {
828 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 831 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
829 "gid_rsp:\n", ha->host_no)); 832 "gid_rsp:\n", vha->host_no));
830 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 833 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
831 rval = QLA_FUNCTION_FAILED; 834 rval = QLA_FUNCTION_FAILED;
832 } else { 835 } else {
@@ -867,17 +870,17 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
867 * Returns 0 on success. 870 * Returns 0 on success.
868 */ 871 */
869static int 872static int
870qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 873qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
871{ 874{
872 int rval; 875 int rval;
873 876 struct qla_hw_data *ha = vha->hw;
874 uint16_t i; 877 uint16_t i;
875 struct sns_cmd_pkt *sns_cmd; 878 struct sns_cmd_pkt *sns_cmd;
876 879
877 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 880 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
878 /* Issue GPN_ID */ 881 /* Issue GPN_ID */
879 /* Prepare SNS command request. */ 882 /* Prepare SNS command request. */
880 sns_cmd = qla2x00_prep_sns_cmd(ha, GPN_ID_CMD, 883 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
881 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 884 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
882 885
883 /* Prepare SNS command arguments -- port_id. */ 886 /* Prepare SNS command arguments -- port_id. */
@@ -886,16 +889,16 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
886 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 889 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
887 890
888 /* Execute SNS command. */ 891 /* Execute SNS command. */
889 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 892 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
890 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 893 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
891 if (rval != QLA_SUCCESS) { 894 if (rval != QLA_SUCCESS) {
892 /*EMPTY*/ 895 /*EMPTY*/
893 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 896 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
894 "(%d).\n", ha->host_no, rval)); 897 "(%d).\n", vha->host_no, rval));
895 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 898 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
896 sns_cmd->p.gpn_data[9] != 0x02) { 899 sns_cmd->p.gpn_data[9] != 0x02) {
897 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 900 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
898 "request, gpn_rsp:\n", ha->host_no)); 901 "request, gpn_rsp:\n", vha->host_no));
899 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 902 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
900 rval = QLA_FUNCTION_FAILED; 903 rval = QLA_FUNCTION_FAILED;
901 } else { 904 } else {
@@ -922,17 +925,17 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
922 * Returns 0 on success. 925 * Returns 0 on success.
923 */ 926 */
924static int 927static int
925qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 928qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
926{ 929{
927 int rval; 930 int rval;
928 931 struct qla_hw_data *ha = vha->hw;
929 uint16_t i; 932 uint16_t i;
930 struct sns_cmd_pkt *sns_cmd; 933 struct sns_cmd_pkt *sns_cmd;
931 934
932 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 935 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
933 /* Issue GNN_ID */ 936 /* Issue GNN_ID */
934 /* Prepare SNS command request. */ 937 /* Prepare SNS command request. */
935 sns_cmd = qla2x00_prep_sns_cmd(ha, GNN_ID_CMD, 938 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
936 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 939 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
937 940
938 /* Prepare SNS command arguments -- port_id. */ 941 /* Prepare SNS command arguments -- port_id. */
@@ -941,16 +944,16 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
941 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 944 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
942 945
943 /* Execute SNS command. */ 946 /* Execute SNS command. */
944 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 947 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
945 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 948 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
946 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
947 /*EMPTY*/ 950 /*EMPTY*/
948 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 951 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
949 "(%d).\n", ha->host_no, rval)); 952 "(%d).\n", vha->host_no, rval));
950 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 953 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
951 sns_cmd->p.gnn_data[9] != 0x02) { 954 sns_cmd->p.gnn_data[9] != 0x02) {
952 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 955 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
953 "request, gnn_rsp:\n", ha->host_no)); 956 "request, gnn_rsp:\n", vha->host_no));
954 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 957 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
955 rval = QLA_FUNCTION_FAILED; 958 rval = QLA_FUNCTION_FAILED;
956 } else { 959 } else {
@@ -962,7 +965,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
962 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 965 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
963 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 966 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
964 "portid=%02x%02x%02x.\n", 967 "portid=%02x%02x%02x.\n",
965 ha->host_no, 968 vha->host_no,
966 list[i].node_name[0], list[i].node_name[1], 969 list[i].node_name[0], list[i].node_name[1],
967 list[i].node_name[2], list[i].node_name[3], 970 list[i].node_name[2], list[i].node_name[3],
968 list[i].node_name[4], list[i].node_name[5], 971 list[i].node_name[4], list[i].node_name[5],
@@ -992,40 +995,40 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
992 * Returns 0 on success. 995 * Returns 0 on success.
993 */ 996 */
994static int 997static int
995qla2x00_sns_rft_id(scsi_qla_host_t *ha) 998qla2x00_sns_rft_id(scsi_qla_host_t *vha)
996{ 999{
997 int rval; 1000 int rval;
998 1001 struct qla_hw_data *ha = vha->hw;
999 struct sns_cmd_pkt *sns_cmd; 1002 struct sns_cmd_pkt *sns_cmd;
1000 1003
1001 /* Issue RFT_ID. */ 1004 /* Issue RFT_ID. */
1002 /* Prepare SNS command request. */ 1005 /* Prepare SNS command request. */
1003 sns_cmd = qla2x00_prep_sns_cmd(ha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1006 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1004 RFT_ID_SNS_DATA_SIZE); 1007 RFT_ID_SNS_DATA_SIZE);
1005 1008
1006 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1009 /* Prepare SNS command arguments -- port_id, FC-4 types */
1007 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1010 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1008 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1011 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1009 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1012 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1010 1013
1011 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1014 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1012 1015
1013 /* Execute SNS command. */ 1016 /* Execute SNS command. */
1014 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1017 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1015 sizeof(struct sns_cmd_pkt)); 1018 sizeof(struct sns_cmd_pkt));
1016 if (rval != QLA_SUCCESS) { 1019 if (rval != QLA_SUCCESS) {
1017 /*EMPTY*/ 1020 /*EMPTY*/
1018 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1021 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
1019 ha->host_no, rval)); 1022 vha->host_no, rval));
1020 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1023 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1021 sns_cmd->p.rft_data[9] != 0x02) { 1024 sns_cmd->p.rft_data[9] != 0x02) {
1022 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1025 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
1023 "rft_rsp:\n", ha->host_no)); 1026 "rft_rsp:\n", vha->host_no));
1024 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1027 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
1025 rval = QLA_FUNCTION_FAILED; 1028 rval = QLA_FUNCTION_FAILED;
1026 } else { 1029 } else {
1027 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1030 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
1028 ha->host_no)); 1031 vha->host_no));
1029 } 1032 }
1030 1033
1031 return (rval); 1034 return (rval);
@@ -1041,47 +1044,47 @@ qla2x00_sns_rft_id(scsi_qla_host_t *ha)
1041 * Returns 0 on success. 1044 * Returns 0 on success.
1042 */ 1045 */
1043static int 1046static int
1044qla2x00_sns_rnn_id(scsi_qla_host_t *ha) 1047qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1045{ 1048{
1046 int rval; 1049 int rval;
1047 1050 struct qla_hw_data *ha = vha->hw;
1048 struct sns_cmd_pkt *sns_cmd; 1051 struct sns_cmd_pkt *sns_cmd;
1049 1052
1050 /* Issue RNN_ID. */ 1053 /* Issue RNN_ID. */
1051 /* Prepare SNS command request. */ 1054 /* Prepare SNS command request. */
1052 sns_cmd = qla2x00_prep_sns_cmd(ha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1055 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1053 RNN_ID_SNS_DATA_SIZE); 1056 RNN_ID_SNS_DATA_SIZE);
1054 1057
1055 /* Prepare SNS command arguments -- port_id, nodename. */ 1058 /* Prepare SNS command arguments -- port_id, nodename. */
1056 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1059 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1057 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1060 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1058 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1061 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1059 1062
1060 sns_cmd->p.cmd.param[4] = ha->node_name[7]; 1063 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1061 sns_cmd->p.cmd.param[5] = ha->node_name[6]; 1064 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1062 sns_cmd->p.cmd.param[6] = ha->node_name[5]; 1065 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1063 sns_cmd->p.cmd.param[7] = ha->node_name[4]; 1066 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1064 sns_cmd->p.cmd.param[8] = ha->node_name[3]; 1067 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1065 sns_cmd->p.cmd.param[9] = ha->node_name[2]; 1068 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1066 sns_cmd->p.cmd.param[10] = ha->node_name[1]; 1069 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1067 sns_cmd->p.cmd.param[11] = ha->node_name[0]; 1070 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1068 1071
1069 /* Execute SNS command. */ 1072 /* Execute SNS command. */
1070 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1073 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1071 sizeof(struct sns_cmd_pkt)); 1074 sizeof(struct sns_cmd_pkt));
1072 if (rval != QLA_SUCCESS) { 1075 if (rval != QLA_SUCCESS) {
1073 /*EMPTY*/ 1076 /*EMPTY*/
1074 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1077 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
1075 ha->host_no, rval)); 1078 vha->host_no, rval));
1076 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1079 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1077 sns_cmd->p.rnn_data[9] != 0x02) { 1080 sns_cmd->p.rnn_data[9] != 0x02) {
1078 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1081 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
1079 "rnn_rsp:\n", ha->host_no)); 1082 "rnn_rsp:\n", vha->host_no));
1080 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1083 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
1081 rval = QLA_FUNCTION_FAILED; 1084 rval = QLA_FUNCTION_FAILED;
1082 } else { 1085 } else {
1083 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1086 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
1084 ha->host_no)); 1087 vha->host_no));
1085 } 1088 }
1086 1089
1087 return (rval); 1090 return (rval);
@@ -1094,25 +1097,25 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
1094 * Returns 0 on success. 1097 * Returns 0 on success.
1095 */ 1098 */
1096static int 1099static int
1097qla2x00_mgmt_svr_login(scsi_qla_host_t *ha) 1100qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1098{ 1101{
1099 int ret; 1102 int ret;
1100 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1103 uint16_t mb[MAILBOX_REGISTER_COUNT];
1101 1104 struct qla_hw_data *ha = vha->hw;
1102 ret = QLA_SUCCESS; 1105 ret = QLA_SUCCESS;
1103 if (ha->flags.management_server_logged_in) 1106 if (vha->flags.management_server_logged_in)
1104 return ret; 1107 return ret;
1105 1108
1106 ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1107 mb, BIT_1); 1110 mb, BIT_1);
1108 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1109 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1110 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
1111 __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1], 1114 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
1112 mb[2], mb[6], mb[7])); 1115 mb[2], mb[6], mb[7]));
1113 ret = QLA_FUNCTION_FAILED; 1116 ret = QLA_FUNCTION_FAILED;
1114 } else 1117 } else
1115 ha->flags.management_server_logged_in = 1; 1118 vha->flags.management_server_logged_in = 1;
1116 1119
1117 return ret; 1120 return ret;
1118} 1121}
@@ -1126,17 +1129,17 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1126 * Returns a pointer to the @ha's ms_iocb. 1129 * Returns a pointer to the @ha's ms_iocb.
1127 */ 1130 */
1128void * 1131void *
1129qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1132qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1130 uint32_t rsp_size) 1133 uint32_t rsp_size)
1131{ 1134{
1132 ms_iocb_entry_t *ms_pkt; 1135 ms_iocb_entry_t *ms_pkt;
1133 1136 struct qla_hw_data *ha = vha->hw;
1134 ms_pkt = ha->ms_iocb; 1137 ms_pkt = ha->ms_iocb;
1135 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1138 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1136 1139
1137 ms_pkt->entry_type = MS_IOCB_TYPE; 1140 ms_pkt->entry_type = MS_IOCB_TYPE;
1138 ms_pkt->entry_count = 1; 1141 ms_pkt->entry_count = 1;
1139 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); 1142 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1140 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1143 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1141 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1144 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1142 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1145 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
@@ -1164,17 +1167,18 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1164 * Returns a pointer to the @ha's ms_iocb. 1167 * Returns a pointer to the @ha's ms_iocb.
1165 */ 1168 */
1166void * 1169void *
1167qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1170qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1168 uint32_t rsp_size) 1171 uint32_t rsp_size)
1169{ 1172{
1170 struct ct_entry_24xx *ct_pkt; 1173 struct ct_entry_24xx *ct_pkt;
1174 struct qla_hw_data *ha = vha->hw;
1171 1175
1172 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1176 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1173 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1177 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1174 1178
1175 ct_pkt->entry_type = CT_IOCB_TYPE; 1179 ct_pkt->entry_type = CT_IOCB_TYPE;
1176 ct_pkt->entry_count = 1; 1180 ct_pkt->entry_count = 1;
1177 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1181 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1178 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1182 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1179 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1183 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1180 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1184 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1188,14 +1192,15 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1188 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1192 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1189 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1193 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1190 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1194 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1191 ct_pkt->vp_index = ha->vp_idx; 1195 ct_pkt->vp_index = vha->vp_idx;
1192 1196
1193 return ct_pkt; 1197 return ct_pkt;
1194} 1198}
1195 1199
1196static inline ms_iocb_entry_t * 1200static inline ms_iocb_entry_t *
1197qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size) 1201qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1198{ 1202{
1203 struct qla_hw_data *ha = vha->hw;
1199 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1204 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1200 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1205 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1201 1206
@@ -1240,7 +1245,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
1240 * Returns 0 on success. 1245 * Returns 0 on success.
1241 */ 1246 */
1242static int 1247static int
1243qla2x00_fdmi_rhba(scsi_qla_host_t *ha) 1248qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1244{ 1249{
1245 int rval, alen; 1250 int rval, alen;
1246 uint32_t size, sn; 1251 uint32_t size, sn;
@@ -1250,11 +1255,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1250 struct ct_sns_rsp *ct_rsp; 1255 struct ct_sns_rsp *ct_rsp;
1251 uint8_t *entries; 1256 uint8_t *entries;
1252 struct ct_fdmi_hba_attr *eiter; 1257 struct ct_fdmi_hba_attr *eiter;
1258 struct qla_hw_data *ha = vha->hw;
1253 1259
1254 /* Issue RHBA */ 1260 /* Issue RHBA */
1255 /* Prepare common MS IOCB */ 1261 /* Prepare common MS IOCB */
1256 /* Request size adjusted after CT preparation */ 1262 /* Request size adjusted after CT preparation */
1257 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE); 1263 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1258 1264
1259 /* Prepare CT request */ 1265 /* Prepare CT request */
1260 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, 1266 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1262,9 +1268,9 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1262 ct_rsp = &ha->ct_sns->p.rsp; 1268 ct_rsp = &ha->ct_sns->p.rsp;
1263 1269
1264 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1270 /* Prepare FDMI command arguments -- attribute block, attributes. */
1265 memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE); 1271 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1266 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1); 1272 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
1267 memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE); 1273 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1268 size = 2 * WWN_SIZE + 4 + 4; 1274 size = 2 * WWN_SIZE + 4 + 4;
1269 1275
1270 /* Attributes */ 1276 /* Attributes */
@@ -1276,11 +1282,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1276 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1282 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1277 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); 1283 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1278 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); 1284 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1279 memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE); 1285 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1280 size += 4 + WWN_SIZE; 1286 size += 4 + WWN_SIZE;
1281 1287
1282 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1288 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
1283 __func__, ha->host_no, 1289 __func__, vha->host_no,
1284 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1290 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
1285 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1291 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
1286 eiter->a.node_name[6], eiter->a.node_name[7])); 1292 eiter->a.node_name[6], eiter->a.node_name[7]));
@@ -1294,7 +1300,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1294 eiter->len = cpu_to_be16(4 + alen); 1300 eiter->len = cpu_to_be16(4 + alen);
1295 size += 4 + alen; 1301 size += 4 + alen;
1296 1302
1297 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no, 1303 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
1298 eiter->a.manufacturer)); 1304 eiter->a.manufacturer));
1299 1305
1300 /* Serial number. */ 1306 /* Serial number. */
@@ -1307,7 +1313,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1307 eiter->len = cpu_to_be16(4 + alen); 1313 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1314 size += 4 + alen;
1309 1315
1310 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no, 1316 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
1311 eiter->a.serial_num)); 1317 eiter->a.serial_num));
1312 1318
1313 /* Model name. */ 1319 /* Model name. */
@@ -1319,7 +1325,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1319 eiter->len = cpu_to_be16(4 + alen); 1325 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen; 1326 size += 4 + alen;
1321 1327
1322 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no, 1328 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
1323 eiter->a.model)); 1329 eiter->a.model));
1324 1330
1325 /* Model description. */ 1331 /* Model description. */
@@ -1332,7 +1338,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1332 eiter->len = cpu_to_be16(4 + alen); 1338 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1339 size += 4 + alen;
1334 1340
1335 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no, 1341 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
1336 eiter->a.model_desc)); 1342 eiter->a.model_desc));
1337 1343
1338 /* Hardware version. */ 1344 /* Hardware version. */
@@ -1344,7 +1350,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1344 eiter->len = cpu_to_be16(4 + alen); 1350 eiter->len = cpu_to_be16(4 + alen);
1345 size += 4 + alen; 1351 size += 4 + alen;
1346 1352
1347 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no, 1353 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
1348 eiter->a.hw_version)); 1354 eiter->a.hw_version));
1349 1355
1350 /* Driver version. */ 1356 /* Driver version. */
@@ -1356,7 +1362,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1356 eiter->len = cpu_to_be16(4 + alen); 1362 eiter->len = cpu_to_be16(4 + alen);
1357 size += 4 + alen; 1363 size += 4 + alen;
1358 1364
1359 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no, 1365 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
1360 eiter->a.driver_version)); 1366 eiter->a.driver_version));
1361 1367
1362 /* Option ROM version. */ 1368 /* Option ROM version. */
@@ -1368,27 +1374,27 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1368 eiter->len = cpu_to_be16(4 + alen); 1374 eiter->len = cpu_to_be16(4 + alen);
1369 size += 4 + alen; 1375 size += 4 + alen;
1370 1376
1371 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no, 1377 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
1372 eiter->a.orom_version)); 1378 eiter->a.orom_version));
1373 1379
1374 /* Firmware version */ 1380 /* Firmware version */
1375 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1381 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1376 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1382 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1377 ha->isp_ops->fw_version_str(ha, eiter->a.fw_version); 1383 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
1378 alen = strlen(eiter->a.fw_version); 1384 alen = strlen(eiter->a.fw_version);
1379 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1385 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1380 eiter->len = cpu_to_be16(4 + alen); 1386 eiter->len = cpu_to_be16(4 + alen);
1381 size += 4 + alen; 1387 size += 4 + alen;
1382 1388
1383 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no, 1389 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
1384 eiter->a.fw_version)); 1390 eiter->a.fw_version));
1385 1391
1386 /* Update MS request size. */ 1392 /* Update MS request size. */
1387 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1393 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1388 1394
1389 DEBUG13(printk("%s(%ld): RHBA identifier=" 1395 DEBUG13(printk("%s(%ld): RHBA identifier="
1390 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1396 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1391 ha->host_no, ct_req->req.rhba.hba_identifier[0], 1397 vha->host_no, ct_req->req.rhba.hba_identifier[0],
1392 ct_req->req.rhba.hba_identifier[1], 1398 ct_req->req.rhba.hba_identifier[1],
1393 ct_req->req.rhba.hba_identifier[2], 1399 ct_req->req.rhba.hba_identifier[2],
1394 ct_req->req.rhba.hba_identifier[3], 1400 ct_req->req.rhba.hba_identifier[3],
@@ -1399,25 +1405,25 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1399 DEBUG13(qla2x00_dump_buffer(entries, size)); 1405 DEBUG13(qla2x00_dump_buffer(entries, size));
1400 1406
1401 /* Execute MS IOCB */ 1407 /* Execute MS IOCB */
1402 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1408 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1403 sizeof(ms_iocb_entry_t)); 1409 sizeof(ms_iocb_entry_t));
1404 if (rval != QLA_SUCCESS) { 1410 if (rval != QLA_SUCCESS) {
1405 /*EMPTY*/ 1411 /*EMPTY*/
1406 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1412 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
1407 ha->host_no, rval)); 1413 vha->host_no, rval));
1408 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") != 1414 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1409 QLA_SUCCESS) { 1415 QLA_SUCCESS) {
1410 rval = QLA_FUNCTION_FAILED; 1416 rval = QLA_FUNCTION_FAILED;
1411 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1417 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1412 ct_rsp->header.explanation_code == 1418 ct_rsp->header.explanation_code ==
1413 CT_EXPL_ALREADY_REGISTERED) { 1419 CT_EXPL_ALREADY_REGISTERED) {
1414 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1420 DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
1415 __func__, ha->host_no)); 1421 __func__, vha->host_no));
1416 rval = QLA_ALREADY_REGISTERED; 1422 rval = QLA_ALREADY_REGISTERED;
1417 } 1423 }
1418 } else { 1424 } else {
1419 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1425 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
1420 ha->host_no)); 1426 vha->host_no));
1421 } 1427 }
1422 1428
1423 return rval; 1429 return rval;
@@ -1430,17 +1436,17 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1430 * Returns 0 on success. 1436 * Returns 0 on success.
1431 */ 1437 */
1432static int 1438static int
1433qla2x00_fdmi_dhba(scsi_qla_host_t *ha) 1439qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1434{ 1440{
1435 int rval; 1441 int rval;
1436 1442 struct qla_hw_data *ha = vha->hw;
1437 ms_iocb_entry_t *ms_pkt; 1443 ms_iocb_entry_t *ms_pkt;
1438 struct ct_sns_req *ct_req; 1444 struct ct_sns_req *ct_req;
1439 struct ct_sns_rsp *ct_rsp; 1445 struct ct_sns_rsp *ct_rsp;
1440 1446
1441 /* Issue RPA */ 1447 /* Issue RPA */
1442 /* Prepare common MS IOCB */ 1448 /* Prepare common MS IOCB */
1443 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE, 1449 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
1444 DHBA_RSP_SIZE); 1450 DHBA_RSP_SIZE);
1445 1451
1446 /* Prepare CT request */ 1452 /* Prepare CT request */
@@ -1449,28 +1455,28 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1449 ct_rsp = &ha->ct_sns->p.rsp; 1455 ct_rsp = &ha->ct_sns->p.rsp;
1450 1456
1451 /* Prepare FDMI command arguments -- portname. */ 1457 /* Prepare FDMI command arguments -- portname. */
1452 memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE); 1458 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1453 1459
1454 DEBUG13(printk("%s(%ld): DHBA portname=" 1460 DEBUG13(printk("%s(%ld): DHBA portname="
1455 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no, 1461 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
1456 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1462 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1457 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1463 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1458 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1464 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1459 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1465 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
1460 1466
1461 /* Execute MS IOCB */ 1467 /* Execute MS IOCB */
1462 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1468 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1463 sizeof(ms_iocb_entry_t)); 1469 sizeof(ms_iocb_entry_t));
1464 if (rval != QLA_SUCCESS) { 1470 if (rval != QLA_SUCCESS) {
1465 /*EMPTY*/ 1471 /*EMPTY*/
1466 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1472 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
1467 ha->host_no, rval)); 1473 vha->host_no, rval));
1468 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") != 1474 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1469 QLA_SUCCESS) { 1475 QLA_SUCCESS) {
1470 rval = QLA_FUNCTION_FAILED; 1476 rval = QLA_FUNCTION_FAILED;
1471 } else { 1477 } else {
1472 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1478 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
1473 ha->host_no)); 1479 vha->host_no));
1474 } 1480 }
1475 1481
1476 return rval; 1482 return rval;
@@ -1483,11 +1489,11 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1483 * Returns 0 on success. 1489 * Returns 0 on success.
1484 */ 1490 */
1485static int 1491static int
1486qla2x00_fdmi_rpa(scsi_qla_host_t *ha) 1492qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1487{ 1493{
1488 int rval, alen; 1494 int rval, alen;
1489 uint32_t size, max_frame_size; 1495 uint32_t size, max_frame_size;
1490 1496 struct qla_hw_data *ha = vha->hw;
1491 ms_iocb_entry_t *ms_pkt; 1497 ms_iocb_entry_t *ms_pkt;
1492 struct ct_sns_req *ct_req; 1498 struct ct_sns_req *ct_req;
1493 struct ct_sns_rsp *ct_rsp; 1499 struct ct_sns_rsp *ct_rsp;
@@ -1498,7 +1504,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1498 /* Issue RPA */ 1504 /* Issue RPA */
1499 /* Prepare common MS IOCB */ 1505 /* Prepare common MS IOCB */
1500 /* Request size adjusted after CT preparation */ 1506 /* Request size adjusted after CT preparation */
1501 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE); 1507 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1502 1508
1503 /* Prepare CT request */ 1509 /* Prepare CT request */
1504 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, 1510 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1506,7 +1512,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1506 ct_rsp = &ha->ct_sns->p.rsp; 1512 ct_rsp = &ha->ct_sns->p.rsp;
1507 1513
1508 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1514 /* Prepare FDMI command arguments -- attribute block, attributes. */
1509 memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE); 1515 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1510 size = WWN_SIZE + 4; 1516 size = WWN_SIZE + 4;
1511 1517
1512 /* Attributes */ 1518 /* Attributes */
@@ -1521,8 +1527,9 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1521 eiter->a.fc4_types[2] = 0x01; 1527 eiter->a.fc4_types[2] = 0x01;
1522 size += 4 + 32; 1528 size += 4 + 32;
1523 1529
1524 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no, 1530 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
1525 eiter->a.fc4_types[2], eiter->a.fc4_types[1])); 1531 vha->host_no, eiter->a.fc4_types[2],
1532 eiter->a.fc4_types[1]));
1526 1533
1527 /* Supported speed. */ 1534 /* Supported speed. */
1528 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1544,7 +1551,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1544 FDMI_PORT_SPEED_1GB); 1551 FDMI_PORT_SPEED_1GB);
1545 size += 4 + 4; 1552 size += 4 + 4;
1546 1553
1547 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no, 1554 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
1548 eiter->a.sup_speed)); 1555 eiter->a.sup_speed));
1549 1556
1550 /* Current speed. */ 1557 /* Current speed. */
@@ -1575,7 +1582,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1575 } 1582 }
1576 size += 4 + 4; 1583 size += 4 + 4;
1577 1584
1578 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no, 1585 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
1579 eiter->a.cur_speed)); 1586 eiter->a.cur_speed));
1580 1587
1581 /* Max frame size. */ 1588 /* Max frame size. */
@@ -1588,7 +1595,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1595 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1589 size += 4 + 4; 1596 size += 4 + 4;
1590 1597
1591 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no, 1598 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
1592 eiter->a.max_frame_size)); 1599 eiter->a.max_frame_size));
1593 1600
1594 /* OS device name. */ 1601 /* OS device name. */
@@ -1600,32 +1607,32 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1600 eiter->len = cpu_to_be16(4 + alen); 1607 eiter->len = cpu_to_be16(4 + alen);
1601 size += 4 + alen; 1608 size += 4 + alen;
1602 1609
1603 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no, 1610 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
1604 eiter->a.os_dev_name)); 1611 eiter->a.os_dev_name));
1605 1612
1606 /* Hostname. */ 1613 /* Hostname. */
1607 if (strlen(fc_host_system_hostname(ha->host))) { 1614 if (strlen(fc_host_system_hostname(vha->host))) {
1608 ct_req->req.rpa.attrs.count = 1615 ct_req->req.rpa.attrs.count =
1609 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT); 1616 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1610 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1617 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1611 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME); 1618 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
1612 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), 1619 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1613 "%s", fc_host_system_hostname(ha->host)); 1620 "%s", fc_host_system_hostname(vha->host));
1614 alen = strlen(eiter->a.host_name); 1621 alen = strlen(eiter->a.host_name);
1615 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1622 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1616 eiter->len = cpu_to_be16(4 + alen); 1623 eiter->len = cpu_to_be16(4 + alen);
1617 size += 4 + alen; 1624 size += 4 + alen;
1618 1625
1619 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1626 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
1620 ha->host_no, eiter->a.host_name)); 1627 vha->host_no, eiter->a.host_name));
1621 } 1628 }
1622 1629
1623 /* Update MS request size. */ 1630 /* Update MS request size. */
1624 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1631 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1625 1632
1626 DEBUG13(printk("%s(%ld): RPA portname=" 1633 DEBUG13(printk("%s(%ld): RPA portname="
1627 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1634 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1628 ha->host_no, ct_req->req.rpa.port_name[0], 1635 vha->host_no, ct_req->req.rpa.port_name[0],
1629 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1636 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
1630 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1637 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
1631 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1638 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
@@ -1633,18 +1640,18 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1633 DEBUG13(qla2x00_dump_buffer(entries, size)); 1640 DEBUG13(qla2x00_dump_buffer(entries, size));
1634 1641
1635 /* Execute MS IOCB */ 1642 /* Execute MS IOCB */
1636 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1643 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1637 sizeof(ms_iocb_entry_t)); 1644 sizeof(ms_iocb_entry_t));
1638 if (rval != QLA_SUCCESS) { 1645 if (rval != QLA_SUCCESS) {
1639 /*EMPTY*/ 1646 /*EMPTY*/
1640 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1647 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
1641 ha->host_no, rval)); 1648 vha->host_no, rval));
1642 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") != 1649 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1643 QLA_SUCCESS) { 1650 QLA_SUCCESS) {
1644 rval = QLA_FUNCTION_FAILED; 1651 rval = QLA_FUNCTION_FAILED;
1645 } else { 1652 } else {
1646 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1653 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
1647 ha->host_no)); 1654 vha->host_no));
1648 } 1655 }
1649 1656
1650 return rval; 1657 return rval;
@@ -1657,34 +1664,28 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1657 * Returns 0 on success. 1664 * Returns 0 on success.
1658 */ 1665 */
1659int 1666int
1660qla2x00_fdmi_register(scsi_qla_host_t *ha) 1667qla2x00_fdmi_register(scsi_qla_host_t *vha)
1661{ 1668{
1662 int rval; 1669 int rval;
1663 1670
1664 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1671 rval = qla2x00_mgmt_svr_login(vha);
1665 DEBUG2(printk("scsi(%ld): FDMI unsupported on "
1666 "ISP2100/ISP2200.\n", ha->host_no));
1667 return QLA_SUCCESS;
1668 }
1669
1670 rval = qla2x00_mgmt_svr_login(ha);
1671 if (rval) 1672 if (rval)
1672 return rval; 1673 return rval;
1673 1674
1674 rval = qla2x00_fdmi_rhba(ha); 1675 rval = qla2x00_fdmi_rhba(vha);
1675 if (rval) { 1676 if (rval) {
1676 if (rval != QLA_ALREADY_REGISTERED) 1677 if (rval != QLA_ALREADY_REGISTERED)
1677 return rval; 1678 return rval;
1678 1679
1679 rval = qla2x00_fdmi_dhba(ha); 1680 rval = qla2x00_fdmi_dhba(vha);
1680 if (rval) 1681 if (rval)
1681 return rval; 1682 return rval;
1682 1683
1683 rval = qla2x00_fdmi_rhba(ha); 1684 rval = qla2x00_fdmi_rhba(vha);
1684 if (rval) 1685 if (rval)
1685 return rval; 1686 return rval;
1686 } 1687 }
1687 rval = qla2x00_fdmi_rpa(ha); 1688 rval = qla2x00_fdmi_rpa(vha);
1688 1689
1689 return rval; 1690 return rval;
1690} 1691}
@@ -1697,11 +1698,11 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1697 * Returns 0 on success. 1698 * Returns 0 on success.
1698 */ 1699 */
1699int 1700int
1700qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list) 1701qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1701{ 1702{
1702 int rval; 1703 int rval;
1703 uint16_t i; 1704 uint16_t i;
1704 1705 struct qla_hw_data *ha = vha->hw;
1705 ms_iocb_entry_t *ms_pkt; 1706 ms_iocb_entry_t *ms_pkt;
1706 struct ct_sns_req *ct_req; 1707 struct ct_sns_req *ct_req;
1707 struct ct_sns_rsp *ct_rsp; 1708 struct ct_sns_rsp *ct_rsp;
@@ -1712,7 +1713,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1712 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1713 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1713 /* Issue GFPN_ID */ 1714 /* Issue GFPN_ID */
1714 /* Prepare common MS IOCB */ 1715 /* Prepare common MS IOCB */
1715 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE, 1716 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
1716 GFPN_ID_RSP_SIZE); 1717 GFPN_ID_RSP_SIZE);
1717 1718
1718 /* Prepare CT request */ 1719 /* Prepare CT request */
@@ -1726,13 +1727,13 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1726 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 1727 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
1727 1728
1728 /* Execute MS IOCB */ 1729 /* Execute MS IOCB */
1729 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1730 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1730 sizeof(ms_iocb_entry_t)); 1731 sizeof(ms_iocb_entry_t));
1731 if (rval != QLA_SUCCESS) { 1732 if (rval != QLA_SUCCESS) {
1732 /*EMPTY*/ 1733 /*EMPTY*/
1733 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1734 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
1734 "failed (%d).\n", ha->host_no, rval)); 1735 "failed (%d).\n", vha->host_no, rval));
1735 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1736 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1736 "GFPN_ID") != QLA_SUCCESS) { 1737 "GFPN_ID") != QLA_SUCCESS) {
1737 rval = QLA_FUNCTION_FAILED; 1738 rval = QLA_FUNCTION_FAILED;
1738 } else { 1739 } else {
@@ -1750,17 +1751,17 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1750} 1751}
1751 1752
1752static inline void * 1753static inline void *
1753qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1754qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1754 uint32_t rsp_size) 1755 uint32_t rsp_size)
1755{ 1756{
1756 struct ct_entry_24xx *ct_pkt; 1757 struct ct_entry_24xx *ct_pkt;
1757 1758 struct qla_hw_data *ha = vha->hw;
1758 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1759 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1759 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1760 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1760 1761
1761 ct_pkt->entry_type = CT_IOCB_TYPE; 1762 ct_pkt->entry_type = CT_IOCB_TYPE;
1762 ct_pkt->entry_count = 1; 1763 ct_pkt->entry_count = 1;
1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1764 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1764 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1765 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1766 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1767 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1774,7 +1775,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1774 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1775 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1775 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1776 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1776 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1777 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1777 ct_pkt->vp_index = ha->vp_idx; 1778 ct_pkt->vp_index = vha->vp_idx;
1778 1779
1779 return ct_pkt; 1780 return ct_pkt;
1780} 1781}
@@ -1803,11 +1804,11 @@ qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
1803 * Returns 0 on success. 1804 * Returns 0 on success.
1804 */ 1805 */
1805int 1806int
1806qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list) 1807qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1807{ 1808{
1808 int rval; 1809 int rval;
1809 uint16_t i; 1810 uint16_t i;
1810 1811 struct qla_hw_data *ha = vha->hw;
1811 ms_iocb_entry_t *ms_pkt; 1812 ms_iocb_entry_t *ms_pkt;
1812 struct ct_sns_req *ct_req; 1813 struct ct_sns_req *ct_req;
1813 struct ct_sns_rsp *ct_rsp; 1814 struct ct_sns_rsp *ct_rsp;
@@ -1817,14 +1818,14 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1817 if (!ha->flags.gpsc_supported) 1818 if (!ha->flags.gpsc_supported)
1818 return QLA_FUNCTION_FAILED; 1819 return QLA_FUNCTION_FAILED;
1819 1820
1820 rval = qla2x00_mgmt_svr_login(ha); 1821 rval = qla2x00_mgmt_svr_login(vha);
1821 if (rval) 1822 if (rval)
1822 return rval; 1823 return rval;
1823 1824
1824 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1825 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1825 /* Issue GFPN_ID */ 1826 /* Issue GFPN_ID */
1826 /* Prepare common MS IOCB */ 1827 /* Prepare common MS IOCB */
1827 ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE, 1828 ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
1828 GPSC_RSP_SIZE); 1829 GPSC_RSP_SIZE);
1829 1830
1830 /* Prepare CT request */ 1831 /* Prepare CT request */
@@ -1837,13 +1838,13 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1837 WWN_SIZE); 1838 WWN_SIZE);
1838 1839
1839 /* Execute MS IOCB */ 1840 /* Execute MS IOCB */
1840 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1841 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1841 sizeof(ms_iocb_entry_t)); 1842 sizeof(ms_iocb_entry_t));
1842 if (rval != QLA_SUCCESS) { 1843 if (rval != QLA_SUCCESS) {
1843 /*EMPTY*/ 1844 /*EMPTY*/
1844 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1845 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
1845 "failed (%d).\n", ha->host_no, rval)); 1846 "failed (%d).\n", vha->host_no, rval));
1846 } else if ((rval = qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1847 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1847 "GPSC")) != QLA_SUCCESS) { 1848 "GPSC")) != QLA_SUCCESS) {
1848 /* FM command unsupported? */ 1849 /* FM command unsupported? */
1849 if (rval == QLA_INVALID_COMMAND && 1850 if (rval == QLA_INVALID_COMMAND &&
@@ -1853,7 +1854,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1853 CT_REASON_COMMAND_UNSUPPORTED)) { 1854 CT_REASON_COMMAND_UNSUPPORTED)) {
1854 DEBUG2(printk("scsi(%ld): GPSC command " 1855 DEBUG2(printk("scsi(%ld): GPSC command "
1855 "unsupported, disabling query...\n", 1856 "unsupported, disabling query...\n",
1856 ha->host_no)); 1857 vha->host_no));
1857 ha->flags.gpsc_supported = 0; 1858 ha->flags.gpsc_supported = 0;
1858 rval = QLA_FUNCTION_FAILED; 1859 rval = QLA_FUNCTION_FAILED;
1859 break; 1860 break;
@@ -1878,7 +1879,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1878 1879
1879 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1880 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
1880 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1881 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1881 "speed=%04x.\n", ha->host_no, 1882 "speed=%04x.\n", vha->host_no,
1882 list[i].fabric_port_name[0], 1883 list[i].fabric_port_name[0],
1883 list[i].fabric_port_name[1], 1884 list[i].fabric_port_name[1],
1884 list[i].fabric_port_name[2], 1885 list[i].fabric_port_name[2],
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4218f20f5ed5..52ed56ecf195 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -21,7 +22,6 @@
21static int qla2x00_isp_firmware(scsi_qla_host_t *); 22static int qla2x00_isp_firmware(scsi_qla_host_t *);
22static void qla2x00_resize_request_q(scsi_qla_host_t *); 23static void qla2x00_resize_request_q(scsi_qla_host_t *);
23static int qla2x00_setup_chip(scsi_qla_host_t *); 24static int qla2x00_setup_chip(scsi_qla_host_t *);
24static void qla2x00_init_response_q_entries(scsi_qla_host_t *);
25static int qla2x00_init_rings(scsi_qla_host_t *); 25static int qla2x00_init_rings(scsi_qla_host_t *);
26static int qla2x00_fw_ready(scsi_qla_host_t *); 26static int qla2x00_fw_ready(scsi_qla_host_t *);
27static int qla2x00_configure_hba(scsi_qla_host_t *); 27static int qla2x00_configure_hba(scsi_qla_host_t *);
@@ -35,10 +35,11 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); 38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 41static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *);
42 43
43/****************************************************************************/ 44/****************************************************************************/
44/* QLogic ISP2x00 Hardware Support Functions. */ 45/* QLogic ISP2x00 Hardware Support Functions. */
@@ -55,77 +56,81 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
55* 0 = success 56* 0 = success
56*/ 57*/
57int 58int
58qla2x00_initialize_adapter(scsi_qla_host_t *ha) 59qla2x00_initialize_adapter(scsi_qla_host_t *vha)
59{ 60{
60 int rval; 61 int rval;
61 62 struct qla_hw_data *ha = vha->hw;
63 struct req_que *req = ha->req_q_map[0];
62 /* Clear adapter flags. */ 64 /* Clear adapter flags. */
63 ha->flags.online = 0; 65 vha->flags.online = 0;
64 ha->flags.reset_active = 0; 66 vha->flags.reset_active = 0;
65 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 67 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
66 atomic_set(&ha->loop_state, LOOP_DOWN); 68 atomic_set(&vha->loop_state, LOOP_DOWN);
67 ha->device_flags = DFLG_NO_CABLE; 69 vha->device_flags = DFLG_NO_CABLE;
68 ha->dpc_flags = 0; 70 vha->dpc_flags = 0;
69 ha->flags.management_server_logged_in = 0; 71 vha->flags.management_server_logged_in = 0;
70 ha->marker_needed = 0; 72 vha->marker_needed = 0;
71 ha->mbx_flags = 0; 73 ha->mbx_flags = 0;
72 ha->isp_abort_cnt = 0; 74 ha->isp_abort_cnt = 0;
73 ha->beacon_blink_led = 0; 75 ha->beacon_blink_led = 0;
74 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
77
78 set_bit(0, ha->req_qid_map);
79 set_bit(0, ha->rsp_qid_map);
75 80
76 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
77 rval = ha->isp_ops->pci_config(ha); 82 rval = ha->isp_ops->pci_config(vha);
78 if (rval) { 83 if (rval) {
79 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
80 ha->host_no)); 85 vha->host_no));
81 return (rval); 86 return (rval);
82 } 87 }
83 88
84 ha->isp_ops->reset_chip(ha); 89 ha->isp_ops->reset_chip(vha);
85 90
86 rval = qla2xxx_get_flash_info(ha); 91 rval = qla2xxx_get_flash_info(vha);
87 if (rval) { 92 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no)); 94 vha->host_no));
90 return (rval); 95 return (rval);
91 } 96 }
92 97
93 ha->isp_ops->get_flash_version(ha, ha->request_ring); 98 ha->isp_ops->get_flash_version(vha, req->ring);
94 99
95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
96 101
97 ha->isp_ops->nvram_config(ha); 102 ha->isp_ops->nvram_config(vha);
98 103
99 if (ha->flags.disable_serdes) { 104 if (ha->flags.disable_serdes) {
100 /* Mask HBA via NVRAM settings? */ 105 /* Mask HBA via NVRAM settings? */
101 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
102 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
103 ha->port_name[0], ha->port_name[1], 108 vha->port_name[0], vha->port_name[1],
104 ha->port_name[2], ha->port_name[3], 109 vha->port_name[2], vha->port_name[3],
105 ha->port_name[4], ha->port_name[5], 110 vha->port_name[4], vha->port_name[5],
106 ha->port_name[6], ha->port_name[7]); 111 vha->port_name[6], vha->port_name[7]);
107 return QLA_FUNCTION_FAILED; 112 return QLA_FUNCTION_FAILED;
108 } 113 }
109 114
110 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
111 116
112 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
113 rval = ha->isp_ops->chip_diag(ha); 118 rval = ha->isp_ops->chip_diag(vha);
114 if (rval) 119 if (rval)
115 return (rval); 120 return (rval);
116 rval = qla2x00_setup_chip(ha); 121 rval = qla2x00_setup_chip(vha);
117 if (rval) 122 if (rval)
118 return (rval); 123 return (rval);
119 } 124 }
120 if (IS_QLA84XX(ha)) { 125 if (IS_QLA84XX(ha)) {
121 ha->cs84xx = qla84xx_get_chip(ha); 126 ha->cs84xx = qla84xx_get_chip(vha);
122 if (!ha->cs84xx) { 127 if (!ha->cs84xx) {
123 qla_printk(KERN_ERR, ha, 128 qla_printk(KERN_ERR, ha,
124 "Unable to configure ISP84XX.\n"); 129 "Unable to configure ISP84XX.\n");
125 return QLA_FUNCTION_FAILED; 130 return QLA_FUNCTION_FAILED;
126 } 131 }
127 } 132 }
128 rval = qla2x00_init_rings(ha); 133 rval = qla2x00_init_rings(vha);
129 134
130 return (rval); 135 return (rval);
131} 136}
@@ -137,10 +142,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
137 * Returns 0 on success. 142 * Returns 0 on success.
138 */ 143 */
139int 144int
140qla2100_pci_config(scsi_qla_host_t *ha) 145qla2100_pci_config(scsi_qla_host_t *vha)
141{ 146{
142 uint16_t w; 147 uint16_t w;
143 unsigned long flags; 148 unsigned long flags;
149 struct qla_hw_data *ha = vha->hw;
144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 150 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
145 151
146 pci_set_master(ha->pdev); 152 pci_set_master(ha->pdev);
@@ -167,11 +173,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
167 * Returns 0 on success. 173 * Returns 0 on success.
168 */ 174 */
169int 175int
170qla2300_pci_config(scsi_qla_host_t *ha) 176qla2300_pci_config(scsi_qla_host_t *vha)
171{ 177{
172 uint16_t w; 178 uint16_t w;
173 unsigned long flags = 0; 179 unsigned long flags = 0;
174 uint32_t cnt; 180 uint32_t cnt;
181 struct qla_hw_data *ha = vha->hw;
175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 182 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
176 183
177 pci_set_master(ha->pdev); 184 pci_set_master(ha->pdev);
@@ -248,10 +255,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
248 * Returns 0 on success. 255 * Returns 0 on success.
249 */ 256 */
250int 257int
251qla24xx_pci_config(scsi_qla_host_t *ha) 258qla24xx_pci_config(scsi_qla_host_t *vha)
252{ 259{
253 uint16_t w; 260 uint16_t w;
254 unsigned long flags = 0; 261 unsigned long flags = 0;
262 struct qla_hw_data *ha = vha->hw;
255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 263 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
256 264
257 pci_set_master(ha->pdev); 265 pci_set_master(ha->pdev);
@@ -291,9 +299,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
291 * Returns 0 on success. 299 * Returns 0 on success.
292 */ 300 */
293int 301int
294qla25xx_pci_config(scsi_qla_host_t *ha) 302qla25xx_pci_config(scsi_qla_host_t *vha)
295{ 303{
296 uint16_t w; 304 uint16_t w;
305 struct qla_hw_data *ha = vha->hw;
297 306
298 pci_set_master(ha->pdev); 307 pci_set_master(ha->pdev);
299 pci_try_set_mwi(ha->pdev); 308 pci_try_set_mwi(ha->pdev);
@@ -321,32 +330,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
321 * Returns 0 on success. 330 * Returns 0 on success.
322 */ 331 */
323static int 332static int
324qla2x00_isp_firmware(scsi_qla_host_t *ha) 333qla2x00_isp_firmware(scsi_qla_host_t *vha)
325{ 334{
326 int rval; 335 int rval;
327 uint16_t loop_id, topo, sw_cap; 336 uint16_t loop_id, topo, sw_cap;
328 uint8_t domain, area, al_pa; 337 uint8_t domain, area, al_pa;
338 struct qla_hw_data *ha = vha->hw;
329 339
330 /* Assume loading risc code */ 340 /* Assume loading risc code */
331 rval = QLA_FUNCTION_FAILED; 341 rval = QLA_FUNCTION_FAILED;
332 342
333 if (ha->flags.disable_risc_code_load) { 343 if (ha->flags.disable_risc_code_load) {
334 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 344 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
335 ha->host_no)); 345 vha->host_no));
336 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 346 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
337 347
338 /* Verify checksum of loaded RISC code. */ 348 /* Verify checksum of loaded RISC code. */
339 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 349 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
340 if (rval == QLA_SUCCESS) { 350 if (rval == QLA_SUCCESS) {
341 /* And, verify we are not in ROM code. */ 351 /* And, verify we are not in ROM code. */
342 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa, 352 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
343 &area, &domain, &topo, &sw_cap); 353 &area, &domain, &topo, &sw_cap);
344 } 354 }
345 } 355 }
346 356
347 if (rval) { 357 if (rval) {
348 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 358 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
349 ha->host_no)); 359 vha->host_no));
350 } 360 }
351 361
352 return (rval); 362 return (rval);
@@ -359,9 +369,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
359 * Returns 0 on success. 369 * Returns 0 on success.
360 */ 370 */
361void 371void
362qla2x00_reset_chip(scsi_qla_host_t *ha) 372qla2x00_reset_chip(scsi_qla_host_t *vha)
363{ 373{
364 unsigned long flags = 0; 374 unsigned long flags = 0;
375 struct qla_hw_data *ha = vha->hw;
365 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
366 uint32_t cnt; 377 uint32_t cnt;
367 uint16_t cmd; 378 uint16_t cmd;
@@ -499,10 +510,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
499 * Returns 0 on success. 510 * Returns 0 on success.
500 */ 511 */
501static inline void 512static inline void
502qla24xx_reset_risc(scsi_qla_host_t *ha) 513qla24xx_reset_risc(scsi_qla_host_t *vha)
503{ 514{
504 int hw_evt = 0; 515 int hw_evt = 0;
505 unsigned long flags = 0; 516 unsigned long flags = 0;
517 struct qla_hw_data *ha = vha->hw;
506 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
507 uint32_t cnt, d2; 519 uint32_t cnt, d2;
508 uint16_t wd; 520 uint16_t wd;
@@ -541,7 +553,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
541 barrier(); 553 barrier();
542 } 554 }
543 if (cnt == 0 || hw_evt) 555 if (cnt == 0 || hw_evt)
544 qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR, 556 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
545 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2), 557 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
546 RD_REG_WORD(&reg->mailbox3)); 558 RD_REG_WORD(&reg->mailbox3));
547 559
@@ -571,12 +583,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
571 * Returns 0 on success. 583 * Returns 0 on success.
572 */ 584 */
573void 585void
574qla24xx_reset_chip(scsi_qla_host_t *ha) 586qla24xx_reset_chip(scsi_qla_host_t *vha)
575{ 587{
588 struct qla_hw_data *ha = vha->hw;
576 ha->isp_ops->disable_intrs(ha); 589 ha->isp_ops->disable_intrs(ha);
577 590
578 /* Perform RISC reset. */ 591 /* Perform RISC reset. */
579 qla24xx_reset_risc(ha); 592 qla24xx_reset_risc(vha);
580} 593}
581 594
582/** 595/**
@@ -586,20 +599,22 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
586 * Returns 0 on success. 599 * Returns 0 on success.
587 */ 600 */
588int 601int
589qla2x00_chip_diag(scsi_qla_host_t *ha) 602qla2x00_chip_diag(scsi_qla_host_t *vha)
590{ 603{
591 int rval; 604 int rval;
605 struct qla_hw_data *ha = vha->hw;
592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 606 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
593 unsigned long flags = 0; 607 unsigned long flags = 0;
594 uint16_t data; 608 uint16_t data;
595 uint32_t cnt; 609 uint32_t cnt;
596 uint16_t mb[5]; 610 uint16_t mb[5];
611 struct req_que *req = ha->req_q_map[0];
597 612
598 /* Assume a failed state */ 613 /* Assume a failed state */
599 rval = QLA_FUNCTION_FAILED; 614 rval = QLA_FUNCTION_FAILED;
600 615
601 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 616 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
602 ha->host_no, (u_long)&reg->flash_address)); 617 vha->host_no, (u_long)&reg->flash_address));
603 618
604 spin_lock_irqsave(&ha->hardware_lock, flags); 619 spin_lock_irqsave(&ha->hardware_lock, flags);
605 620
@@ -662,17 +677,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
662 ha->product_id[3] = mb[4]; 677 ha->product_id[3] = mb[4];
663 678
664 /* Adjust fw RISC transfer size */ 679 /* Adjust fw RISC transfer size */
665 if (ha->request_q_length > 1024) 680 if (req->length > 1024)
666 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 681 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
667 else 682 else
668 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 683 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
669 ha->request_q_length; 684 req->length;
670 685
671 if (IS_QLA2200(ha) && 686 if (IS_QLA2200(ha) &&
672 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 687 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
673 /* Limit firmware transfer size with a 2200A */ 688 /* Limit firmware transfer size with a 2200A */
674 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 689 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
675 ha->host_no)); 690 vha->host_no));
676 691
677 ha->device_type |= DT_ISP2200A; 692 ha->device_type |= DT_ISP2200A;
678 ha->fw_transfer_size = 128; 693 ha->fw_transfer_size = 128;
@@ -681,11 +696,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
681 /* Wrap Incoming Mailboxes Test. */ 696 /* Wrap Incoming Mailboxes Test. */
682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 697 spin_unlock_irqrestore(&ha->hardware_lock, flags);
683 698
684 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no)); 699 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
685 rval = qla2x00_mbx_reg_test(ha); 700 rval = qla2x00_mbx_reg_test(vha);
686 if (rval) { 701 if (rval) {
687 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 702 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
688 ha->host_no)); 703 vha->host_no));
689 qla_printk(KERN_WARNING, ha, 704 qla_printk(KERN_WARNING, ha,
690 "Failed mailbox send register test\n"); 705 "Failed mailbox send register test\n");
691 } 706 }
@@ -698,7 +713,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
698chip_diag_failed: 713chip_diag_failed:
699 if (rval) 714 if (rval)
700 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 715 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
701 "****\n", ha->host_no)); 716 "****\n", vha->host_no));
702 717
703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 718 spin_unlock_irqrestore(&ha->hardware_lock, flags);
704 719
@@ -712,19 +727,21 @@ chip_diag_failed:
712 * Returns 0 on success. 727 * Returns 0 on success.
713 */ 728 */
714int 729int
715qla24xx_chip_diag(scsi_qla_host_t *ha) 730qla24xx_chip_diag(scsi_qla_host_t *vha)
716{ 731{
717 int rval; 732 int rval;
733 struct qla_hw_data *ha = vha->hw;
734 struct req_que *req = ha->req_q_map[0];
718 735
719 /* Perform RISC reset. */ 736 /* Perform RISC reset. */
720 qla24xx_reset_risc(ha); 737 qla24xx_reset_risc(vha);
721 738
722 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length; 739 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
723 740
724 rval = qla2x00_mbx_reg_test(ha); 741 rval = qla2x00_mbx_reg_test(vha);
725 if (rval) { 742 if (rval) {
726 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 743 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
727 ha->host_no)); 744 vha->host_no));
728 qla_printk(KERN_WARNING, ha, 745 qla_printk(KERN_WARNING, ha,
729 "Failed mailbox send register test\n"); 746 "Failed mailbox send register test\n");
730 } else { 747 } else {
@@ -736,13 +753,16 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
736} 753}
737 754
738void 755void
739qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 756qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
740{ 757{
741 int rval; 758 int rval;
742 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 759 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
743 eft_size, fce_size; 760 eft_size, fce_size, mq_size;
744 dma_addr_t tc_dma; 761 dma_addr_t tc_dma;
745 void *tc; 762 void *tc;
763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = ha->req_q_map[0];
765 struct rsp_que *rsp = ha->rsp_q_map[0];
746 766
747 if (ha->fw_dump) { 767 if (ha->fw_dump) {
748 qla_printk(KERN_WARNING, ha, 768 qla_printk(KERN_WARNING, ha,
@@ -751,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
751 } 771 }
752 772
753 ha->fw_dumped = 0; 773 ha->fw_dumped = 0;
754 fixed_size = mem_size = eft_size = fce_size = 0; 774 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
755 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 775 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
756 fixed_size = sizeof(struct qla2100_fw_dump); 776 fixed_size = sizeof(struct qla2100_fw_dump);
757 } else if (IS_QLA23XX(ha)) { 777 } else if (IS_QLA23XX(ha)) {
@@ -760,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
760 sizeof(uint16_t); 780 sizeof(uint16_t);
761 } else if (IS_FWI2_CAPABLE(ha)) { 781 } else if (IS_FWI2_CAPABLE(ha)) {
762 fixed_size = IS_QLA25XX(ha) ? 782 fixed_size = IS_QLA25XX(ha) ?
763 offsetof(struct qla25xx_fw_dump, ext_mem): 783 offsetof(struct qla25xx_fw_dump, ext_mem) :
764 offsetof(struct qla24xx_fw_dump, ext_mem); 784 offsetof(struct qla24xx_fw_dump, ext_mem);
765 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 785 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
766 sizeof(uint32_t); 786 sizeof(uint32_t);
787 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain);
767 789
768 /* Allocate memory for Fibre Channel Event Buffer. */ 790 /* Allocate memory for Fibre Channel Event Buffer. */
769 if (!IS_QLA25XX(ha)) 791 if (!IS_QLA25XX(ha))
@@ -778,7 +800,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
778 } 800 }
779 801
780 memset(tc, 0, FCE_SIZE); 802 memset(tc, 0, FCE_SIZE);
781 rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 803 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
782 ha->fce_mb, &ha->fce_bufs); 804 ha->fce_mb, &ha->fce_bufs);
783 if (rval) { 805 if (rval) {
784 qla_printk(KERN_WARNING, ha, "Unable to initialize " 806 qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -807,7 +829,7 @@ try_eft:
807 } 829 }
808 830
809 memset(tc, 0, EFT_SIZE); 831 memset(tc, 0, EFT_SIZE);
810 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS); 832 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
811 if (rval) { 833 if (rval) {
812 qla_printk(KERN_WARNING, ha, "Unable to initialize " 834 qla_printk(KERN_WARNING, ha, "Unable to initialize "
813 "EFT (%d).\n", rval); 835 "EFT (%d).\n", rval);
@@ -824,12 +846,12 @@ try_eft:
824 ha->eft = tc; 846 ha->eft = tc;
825 } 847 }
826cont_alloc: 848cont_alloc:
827 req_q_size = ha->request_q_length * sizeof(request_t); 849 req_q_size = req->length * sizeof(request_t);
828 rsp_q_size = ha->response_q_length * sizeof(response_t); 850 rsp_q_size = rsp->length * sizeof(response_t);
829 851
830 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 852 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
831 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
832 eft_size + fce_size; 854 mq_size + eft_size + fce_size;
833 855
834 ha->fw_dump = vmalloc(dump_size); 856 ha->fw_dump = vmalloc(dump_size);
835 if (!ha->fw_dump) { 857 if (!ha->fw_dump) {
@@ -844,7 +866,6 @@ cont_alloc:
844 } 866 }
845 return; 867 return;
846 } 868 }
847
848 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 869 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
849 dump_size / 1024); 870 dump_size / 1024);
850 871
@@ -875,27 +896,29 @@ cont_alloc:
875 * Returns 0 on success. 896 * Returns 0 on success.
876 */ 897 */
877static void 898static void
878qla2x00_resize_request_q(scsi_qla_host_t *ha) 899qla2x00_resize_request_q(scsi_qla_host_t *vha)
879{ 900{
880 int rval; 901 int rval;
881 uint16_t fw_iocb_cnt = 0; 902 uint16_t fw_iocb_cnt = 0;
882 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM; 903 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
883 dma_addr_t request_dma; 904 dma_addr_t request_dma;
884 request_t *request_ring; 905 request_t *request_ring;
906 struct qla_hw_data *ha = vha->hw;
907 struct req_que *req = ha->req_q_map[0];
885 908
886 /* Valid only on recent ISPs. */ 909 /* Valid only on recent ISPs. */
887 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 910 if (IS_QLA2100(ha) || IS_QLA2200(ha))
888 return; 911 return;
889 912
890 /* Retrieve IOCB counts available to the firmware. */ 913 /* Retrieve IOCB counts available to the firmware. */
891 rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt, 914 rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
892 &ha->max_npiv_vports); 915 &ha->max_npiv_vports);
893 if (rval) 916 if (rval)
894 return; 917 return;
895 /* No point in continuing if current settings are sufficient. */ 918 /* No point in continuing if current settings are sufficient. */
896 if (fw_iocb_cnt < 1024) 919 if (fw_iocb_cnt < 1024)
897 return; 920 return;
898 if (ha->request_q_length >= request_q_length) 921 if (req->length >= request_q_length)
899 return; 922 return;
900 923
901 /* Attempt to claim larger area for request queue. */ 924 /* Attempt to claim larger area for request queue. */
@@ -909,17 +932,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
909 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n", 932 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
910 (ha->fw_memory_size + 1) / 1024); 933 (ha->fw_memory_size + 1) / 1024);
911 qla_printk(KERN_INFO, ha, "Resizing request queue depth " 934 qla_printk(KERN_INFO, ha, "Resizing request queue depth "
912 "(%d -> %d)...\n", ha->request_q_length, request_q_length); 935 "(%d -> %d)...\n", req->length, request_q_length);
913 936
914 /* Clear old allocations. */ 937 /* Clear old allocations. */
915 dma_free_coherent(&ha->pdev->dev, 938 dma_free_coherent(&ha->pdev->dev,
916 (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring, 939 (req->length + 1) * sizeof(request_t), req->ring,
917 ha->request_dma); 940 req->dma);
918 941
919 /* Begin using larger queue. */ 942 /* Begin using larger queue. */
920 ha->request_q_length = request_q_length; 943 req->length = request_q_length;
921 ha->request_ring = request_ring; 944 req->ring = request_ring;
922 ha->request_dma = request_dma; 945 req->dma = request_dma;
923} 946}
924 947
925/** 948/**
@@ -929,10 +952,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
929 * Returns 0 on success. 952 * Returns 0 on success.
930 */ 953 */
931static int 954static int
932qla2x00_setup_chip(scsi_qla_host_t *ha) 955qla2x00_setup_chip(scsi_qla_host_t *vha)
933{ 956{
934 int rval; 957 int rval;
935 uint32_t srisc_address = 0; 958 uint32_t srisc_address = 0;
959 struct qla_hw_data *ha = vha->hw;
936 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 960 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
937 unsigned long flags; 961 unsigned long flags;
938 962
@@ -945,28 +969,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
945 } 969 }
946 970
947 /* Load firmware sequences */ 971 /* Load firmware sequences */
948 rval = ha->isp_ops->load_risc(ha, &srisc_address); 972 rval = ha->isp_ops->load_risc(vha, &srisc_address);
949 if (rval == QLA_SUCCESS) { 973 if (rval == QLA_SUCCESS) {
950 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 974 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
951 "code.\n", ha->host_no)); 975 "code.\n", vha->host_no));
952 976
953 rval = qla2x00_verify_checksum(ha, srisc_address); 977 rval = qla2x00_verify_checksum(vha, srisc_address);
954 if (rval == QLA_SUCCESS) { 978 if (rval == QLA_SUCCESS) {
955 /* Start firmware execution. */ 979 /* Start firmware execution. */
956 DEBUG(printk("scsi(%ld): Checksum OK, start " 980 DEBUG(printk("scsi(%ld): Checksum OK, start "
957 "firmware.\n", ha->host_no)); 981 "firmware.\n", vha->host_no));
958 982
959 rval = qla2x00_execute_fw(ha, srisc_address); 983 rval = qla2x00_execute_fw(vha, srisc_address);
960 /* Retrieve firmware information. */ 984 /* Retrieve firmware information. */
961 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) { 985 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
962 qla2x00_get_fw_version(ha, 986 qla2x00_get_fw_version(vha,
963 &ha->fw_major_version, 987 &ha->fw_major_version,
964 &ha->fw_minor_version, 988 &ha->fw_minor_version,
965 &ha->fw_subminor_version, 989 &ha->fw_subminor_version,
966 &ha->fw_attributes, &ha->fw_memory_size); 990 &ha->fw_attributes, &ha->fw_memory_size);
967 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
969 IS_QLA84XX(ha)) &&
970 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
971 ha->flags.npiv_supported = 1; 994 ha->flags.npiv_supported = 1;
972 if ((!ha->max_npiv_vports) || 995 if ((!ha->max_npiv_vports) ||
@@ -975,15 +998,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
975 ha->max_npiv_vports = 998 ha->max_npiv_vports =
976 MIN_MULTI_ID_FABRIC - 1; 999 MIN_MULTI_ID_FABRIC - 1;
977 } 1000 }
978 qla2x00_resize_request_q(ha); 1001 qla2x00_resize_request_q(vha);
979 1002
980 if (ql2xallocfwdump) 1003 if (ql2xallocfwdump)
981 qla2x00_alloc_fw_dump(ha); 1004 qla2x00_alloc_fw_dump(vha);
982 } 1005 }
983 } else { 1006 } else {
984 DEBUG2(printk(KERN_INFO 1007 DEBUG2(printk(KERN_INFO
985 "scsi(%ld): ISP Firmware failed checksum.\n", 1008 "scsi(%ld): ISP Firmware failed checksum.\n",
986 ha->host_no)); 1009 vha->host_no));
987 } 1010 }
988 } 1011 }
989 1012
@@ -1002,7 +1025,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1002 1025
1003 if (rval) { 1026 if (rval) {
1004 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1027 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1005 ha->host_no)); 1028 vha->host_no));
1006 } 1029 }
1007 1030
1008 return (rval); 1031 return (rval);
@@ -1017,14 +1040,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1017 * 1040 *
1018 * Returns 0 on success. 1041 * Returns 0 on success.
1019 */ 1042 */
1020static void 1043void
1021qla2x00_init_response_q_entries(scsi_qla_host_t *ha) 1044qla2x00_init_response_q_entries(struct rsp_que *rsp)
1022{ 1045{
1023 uint16_t cnt; 1046 uint16_t cnt;
1024 response_t *pkt; 1047 response_t *pkt;
1025 1048
1026 pkt = ha->response_ring_ptr; 1049 pkt = rsp->ring_ptr;
1027 for (cnt = 0; cnt < ha->response_q_length; cnt++) { 1050 for (cnt = 0; cnt < rsp->length; cnt++) {
1028 pkt->signature = RESPONSE_PROCESSED; 1051 pkt->signature = RESPONSE_PROCESSED;
1029 pkt++; 1052 pkt++;
1030 } 1053 }
@@ -1038,19 +1061,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
1038 * Returns 0 on success. 1061 * Returns 0 on success.
1039 */ 1062 */
1040void 1063void
1041qla2x00_update_fw_options(scsi_qla_host_t *ha) 1064qla2x00_update_fw_options(scsi_qla_host_t *vha)
1042{ 1065{
1043 uint16_t swing, emphasis, tx_sens, rx_sens; 1066 uint16_t swing, emphasis, tx_sens, rx_sens;
1067 struct qla_hw_data *ha = vha->hw;
1044 1068
1045 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1069 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1046 qla2x00_get_fw_options(ha, ha->fw_options); 1070 qla2x00_get_fw_options(vha, ha->fw_options);
1047 1071
1048 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1072 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1049 return; 1073 return;
1050 1074
1051 /* Serial Link options. */ 1075 /* Serial Link options. */
1052 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1076 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1053 ha->host_no)); 1077 vha->host_no));
1054 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1078 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1055 sizeof(ha->fw_seriallink_options))); 1079 sizeof(ha->fw_seriallink_options)));
1056 1080
@@ -1108,19 +1132,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
1108 ha->fw_options[2] |= BIT_13; 1132 ha->fw_options[2] |= BIT_13;
1109 1133
1110 /* Update firmware options. */ 1134 /* Update firmware options. */
1111 qla2x00_set_fw_options(ha, ha->fw_options); 1135 qla2x00_set_fw_options(vha, ha->fw_options);
1112} 1136}
1113 1137
1114void 1138void
1115qla24xx_update_fw_options(scsi_qla_host_t *ha) 1139qla24xx_update_fw_options(scsi_qla_host_t *vha)
1116{ 1140{
1117 int rval; 1141 int rval;
1142 struct qla_hw_data *ha = vha->hw;
1118 1143
1119 /* Update Serial Link options. */ 1144 /* Update Serial Link options. */
1120 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1145 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1121 return; 1146 return;
1122 1147
1123 rval = qla2x00_set_serdes_params(ha, 1148 rval = qla2x00_set_serdes_params(vha,
1124 le16_to_cpu(ha->fw_seriallink_options24[1]), 1149 le16_to_cpu(ha->fw_seriallink_options24[1]),
1125 le16_to_cpu(ha->fw_seriallink_options24[2]), 1150 le16_to_cpu(ha->fw_seriallink_options24[2]),
1126 le16_to_cpu(ha->fw_seriallink_options24[3])); 1151 le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1131,19 +1156,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
1131} 1156}
1132 1157
1133void 1158void
1134qla2x00_config_rings(struct scsi_qla_host *ha) 1159qla2x00_config_rings(struct scsi_qla_host *vha)
1135{ 1160{
1161 struct qla_hw_data *ha = vha->hw;
1136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1162 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1163 struct req_que *req = ha->req_q_map[0];
1164 struct rsp_que *rsp = ha->rsp_q_map[0];
1137 1165
1138 /* Setup ring parameters in initialization control block. */ 1166 /* Setup ring parameters in initialization control block. */
1139 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1167 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1140 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1168 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1141 ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length); 1169 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1142 ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length); 1170 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1143 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1171 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1144 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1172 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1145 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1173 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1146 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1174 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1147 1175
1148 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1176 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1149 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1177 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1153,27 +1181,62 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
1153} 1181}
1154 1182
1155void 1183void
1156qla24xx_config_rings(struct scsi_qla_host *ha) 1184qla24xx_config_rings(struct scsi_qla_host *vha)
1157{ 1185{
1158 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1186 struct qla_hw_data *ha = vha->hw;
1187 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1188 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1189 struct qla_msix_entry *msix;
1159 struct init_cb_24xx *icb; 1190 struct init_cb_24xx *icb;
1191 uint16_t rid = 0;
1192 struct req_que *req = ha->req_q_map[0];
1193 struct rsp_que *rsp = ha->rsp_q_map[0];
1160 1194
1161 /* Setup ring parameters in initialization control block. */ 1195/* Setup ring parameters in initialization control block. */
1162 icb = (struct init_cb_24xx *)ha->init_cb; 1196 icb = (struct init_cb_24xx *)ha->init_cb;
1163 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1197 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1164 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1198 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1165 icb->request_q_length = cpu_to_le16(ha->request_q_length); 1199 icb->request_q_length = cpu_to_le16(req->length);
1166 icb->response_q_length = cpu_to_le16(ha->response_q_length); 1200 icb->response_q_length = cpu_to_le16(rsp->length);
1167 icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1201 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1168 icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1202 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1169 icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1203 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1170 icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1204 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1171 1205
1172 WRT_REG_DWORD(&reg->req_q_in, 0); 1206 if (ha->mqenable) {
1173 WRT_REG_DWORD(&reg->req_q_out, 0); 1207 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1174 WRT_REG_DWORD(&reg->rsp_q_in, 0); 1208 icb->rid = __constant_cpu_to_le16(rid);
1175 WRT_REG_DWORD(&reg->rsp_q_out, 0); 1209 if (ha->flags.msix_enabled) {
1176 RD_REG_DWORD(&reg->rsp_q_out); 1210 msix = &ha->msix_entries[1];
1211 DEBUG2_17(printk(KERN_INFO
1212 "Reistering vector 0x%x for base que\n", msix->entry));
1213 icb->msix = cpu_to_le16(msix->entry);
1214 }
1215 /* Use alternate PCI bus number */
1216 if (MSB(rid))
1217 icb->firmware_options_2 |=
1218 __constant_cpu_to_le32(BIT_19);
1219 /* Use alternate PCI devfn */
1220 if (LSB(rid))
1221 icb->firmware_options_2 |=
1222 __constant_cpu_to_le32(BIT_18);
1223
1224 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
1225 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1226 ha->rsp_q_map[0]->options = icb->firmware_options_2;
1227
1228 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1229 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1230 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1231 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1232 } else {
1233 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1234 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1235 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1236 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1237 }
1238 /* PCI posting */
1239 RD_REG_DWORD(&ioreg->hccr);
1177} 1240}
1178 1241
1179/** 1242/**
@@ -1186,11 +1249,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
1186 * Returns 0 on success. 1249 * Returns 0 on success.
1187 */ 1250 */
1188static int 1251static int
1189qla2x00_init_rings(scsi_qla_host_t *ha) 1252qla2x00_init_rings(scsi_qla_host_t *vha)
1190{ 1253{
1191 int rval; 1254 int rval;
1192 unsigned long flags = 0; 1255 unsigned long flags = 0;
1193 int cnt; 1256 int cnt;
1257 struct qla_hw_data *ha = vha->hw;
1258 struct req_que *req = ha->req_q_map[0];
1259 struct rsp_que *rsp = ha->rsp_q_map[0];
1194 struct mid_init_cb_24xx *mid_init_cb = 1260 struct mid_init_cb_24xx *mid_init_cb =
1195 (struct mid_init_cb_24xx *) ha->init_cb; 1261 (struct mid_init_cb_24xx *) ha->init_cb;
1196 1262
@@ -1198,45 +1264,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1198 1264
1199 /* Clear outstanding commands array. */ 1265 /* Clear outstanding commands array. */
1200 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1266 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1201 ha->outstanding_cmds[cnt] = NULL; 1267 req->outstanding_cmds[cnt] = NULL;
1202 1268
1203 ha->current_outstanding_cmd = 0; 1269 req->current_outstanding_cmd = 0;
1204 1270
1205 /* Clear RSCN queue. */ 1271 /* Clear RSCN queue. */
1206 ha->rscn_in_ptr = 0; 1272 vha->rscn_in_ptr = 0;
1207 ha->rscn_out_ptr = 0; 1273 vha->rscn_out_ptr = 0;
1208 1274
1209 /* Initialize firmware. */ 1275 /* Initialize firmware. */
1210 ha->request_ring_ptr = ha->request_ring; 1276 req->ring_ptr = req->ring;
1211 ha->req_ring_index = 0; 1277 req->ring_index = 0;
1212 ha->req_q_cnt = ha->request_q_length; 1278 req->cnt = req->length;
1213 ha->response_ring_ptr = ha->response_ring; 1279 rsp->ring_ptr = rsp->ring;
1214 ha->rsp_ring_index = 0; 1280 rsp->ring_index = 0;
1215 1281
1216 /* Initialize response queue entries */ 1282 /* Initialize response queue entries */
1217 qla2x00_init_response_q_entries(ha); 1283 qla2x00_init_response_q_entries(rsp);
1218 1284
1219 ha->isp_ops->config_rings(ha); 1285 ha->isp_ops->config_rings(vha);
1220 1286
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 1288
1223 /* Update any ISP specific firmware options before initialization. */ 1289 /* Update any ISP specific firmware options before initialization. */
1224 ha->isp_ops->update_fw_options(ha); 1290 ha->isp_ops->update_fw_options(vha);
1225 1291
1226 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1292 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1227 1293
1228 if (ha->flags.npiv_supported) 1294 if (ha->flags.npiv_supported)
1229 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1295 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1230 1296
1231 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1297 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1232 1298
1233 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1299 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1234 if (rval) { 1300 if (rval) {
1235 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1301 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1236 ha->host_no)); 1302 vha->host_no));
1237 } else { 1303 } else {
1238 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1304 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1239 ha->host_no)); 1305 vha->host_no));
1240 } 1306 }
1241 1307
1242 return (rval); 1308 return (rval);
@@ -1249,13 +1315,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1249 * Returns 0 on success. 1315 * Returns 0 on success.
1250 */ 1316 */
1251static int 1317static int
1252qla2x00_fw_ready(scsi_qla_host_t *ha) 1318qla2x00_fw_ready(scsi_qla_host_t *vha)
1253{ 1319{
1254 int rval; 1320 int rval;
1255 unsigned long wtime, mtime, cs84xx_time; 1321 unsigned long wtime, mtime, cs84xx_time;
1256 uint16_t min_wait; /* Minimum wait time if loop is down */ 1322 uint16_t min_wait; /* Minimum wait time if loop is down */
1257 uint16_t wait_time; /* Wait time if loop is coming ready */ 1323 uint16_t wait_time; /* Wait time if loop is coming ready */
1258 uint16_t state[3]; 1324 uint16_t state[3];
1325 struct qla_hw_data *ha = vha->hw;
1259 1326
1260 rval = QLA_SUCCESS; 1327 rval = QLA_SUCCESS;
1261 1328
@@ -1277,29 +1344,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1277 wtime = jiffies + (wait_time * HZ); 1344 wtime = jiffies + (wait_time * HZ);
1278 1345
1279 /* Wait for ISP to finish LIP */ 1346 /* Wait for ISP to finish LIP */
1280 if (!ha->flags.init_done) 1347 if (!vha->flags.init_done)
1281 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1348 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1282 1349
1283 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1350 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1284 ha->host_no)); 1351 vha->host_no));
1285 1352
1286 do { 1353 do {
1287 rval = qla2x00_get_firmware_state(ha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1288 if (rval == QLA_SUCCESS) { 1355 if (rval == QLA_SUCCESS) {
1289 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1356 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1290 ha->device_flags &= ~DFLG_NO_CABLE; 1357 vha->device_flags &= ~DFLG_NO_CABLE;
1291 } 1358 }
1292 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1359 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1293 DEBUG16(printk("scsi(%ld): fw_state=%x " 1360 DEBUG16(printk("scsi(%ld): fw_state=%x "
1294 "84xx=%x.\n", ha->host_no, state[0], 1361 "84xx=%x.\n", vha->host_no, state[0],
1295 state[2])); 1362 state[2]));
1296 if ((state[2] & FSTATE_LOGGED_IN) && 1363 if ((state[2] & FSTATE_LOGGED_IN) &&
1297 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1364 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1298 DEBUG16(printk("scsi(%ld): Sending " 1365 DEBUG16(printk("scsi(%ld): Sending "
1299 "verify iocb.\n", ha->host_no)); 1366 "verify iocb.\n", vha->host_no));
1300 1367
1301 cs84xx_time = jiffies; 1368 cs84xx_time = jiffies;
1302 rval = qla84xx_init_chip(ha); 1369 rval = qla84xx_init_chip(vha);
1303 if (rval != QLA_SUCCESS) 1370 if (rval != QLA_SUCCESS)
1304 break; 1371 break;
1305 1372
@@ -1309,13 +1376,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1309 mtime += cs84xx_time; 1376 mtime += cs84xx_time;
1310 DEBUG16(printk("scsi(%ld): Increasing " 1377 DEBUG16(printk("scsi(%ld): Increasing "
1311 "wait time by %ld. New time %ld\n", 1378 "wait time by %ld. New time %ld\n",
1312 ha->host_no, cs84xx_time, wtime)); 1379 vha->host_no, cs84xx_time, wtime));
1313 } 1380 }
1314 } else if (state[0] == FSTATE_READY) { 1381 } else if (state[0] == FSTATE_READY) {
1315 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1382 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1316 ha->host_no)); 1383 vha->host_no));
1317 1384
1318 qla2x00_get_retry_cnt(ha, &ha->retry_count, 1385 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1319 &ha->login_timeout, &ha->r_a_tov); 1386 &ha->login_timeout, &ha->r_a_tov);
1320 1387
1321 rval = QLA_SUCCESS; 1388 rval = QLA_SUCCESS;
@@ -1324,7 +1391,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1324 1391
1325 rval = QLA_FUNCTION_FAILED; 1392 rval = QLA_FUNCTION_FAILED;
1326 1393
1327 if (atomic_read(&ha->loop_down_timer) && 1394 if (atomic_read(&vha->loop_down_timer) &&
1328 state[0] != FSTATE_READY) { 1395 state[0] != FSTATE_READY) {
1329 /* Loop down. Timeout on min_wait for states 1396 /* Loop down. Timeout on min_wait for states
1330 * other than Wait for Login. 1397 * other than Wait for Login.
@@ -1333,7 +1400,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1333 qla_printk(KERN_INFO, ha, 1400 qla_printk(KERN_INFO, ha,
1334 "Cable is unplugged...\n"); 1401 "Cable is unplugged...\n");
1335 1402
1336 ha->device_flags |= DFLG_NO_CABLE; 1403 vha->device_flags |= DFLG_NO_CABLE;
1337 break; 1404 break;
1338 } 1405 }
1339 } 1406 }
@@ -1350,15 +1417,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1350 msleep(500); 1417 msleep(500);
1351 1418
1352 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1419 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1353 ha->host_no, state[0], jiffies)); 1420 vha->host_no, state[0], jiffies));
1354 } while (1); 1421 } while (1);
1355 1422
1356 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1423 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1357 ha->host_no, state[0], jiffies)); 1424 vha->host_no, state[0], jiffies));
1358 1425
1359 if (rval) { 1426 if (rval) {
1360 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1427 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1361 ha->host_no)); 1428 vha->host_no));
1362 } 1429 }
1363 1430
1364 return (rval); 1431 return (rval);
@@ -1378,7 +1445,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1378* Kernel context. 1445* Kernel context.
1379*/ 1446*/
1380static int 1447static int
1381qla2x00_configure_hba(scsi_qla_host_t *ha) 1448qla2x00_configure_hba(scsi_qla_host_t *vha)
1382{ 1449{
1383 int rval; 1450 int rval;
1384 uint16_t loop_id; 1451 uint16_t loop_id;
@@ -1388,19 +1455,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1388 uint8_t area; 1455 uint8_t area;
1389 uint8_t domain; 1456 uint8_t domain;
1390 char connect_type[22]; 1457 char connect_type[22];
1458 struct qla_hw_data *ha = vha->hw;
1391 1459
1392 /* Get host addresses. */ 1460 /* Get host addresses. */
1393 rval = qla2x00_get_adapter_id(ha, 1461 rval = qla2x00_get_adapter_id(vha,
1394 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1462 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1395 if (rval != QLA_SUCCESS) { 1463 if (rval != QLA_SUCCESS) {
1396 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1464 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1397 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1465 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1398 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1466 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1399 __func__, ha->host_no)); 1467 __func__, vha->host_no));
1400 } else { 1468 } else {
1401 qla_printk(KERN_WARNING, ha, 1469 qla_printk(KERN_WARNING, ha,
1402 "ERROR -- Unable to get host loop ID.\n"); 1470 "ERROR -- Unable to get host loop ID.\n");
1403 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1404 } 1472 }
1405 return (rval); 1473 return (rval);
1406 } 1474 }
@@ -1411,7 +1479,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1411 return (QLA_FUNCTION_FAILED); 1479 return (QLA_FUNCTION_FAILED);
1412 } 1480 }
1413 1481
1414 ha->loop_id = loop_id; 1482 vha->loop_id = loop_id;
1415 1483
1416 /* initialize */ 1484 /* initialize */
1417 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1485 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1421,14 +1489,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1421 switch (topo) { 1489 switch (topo) {
1422 case 0: 1490 case 0:
1423 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1491 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1424 ha->host_no)); 1492 vha->host_no));
1425 ha->current_topology = ISP_CFG_NL; 1493 ha->current_topology = ISP_CFG_NL;
1426 strcpy(connect_type, "(Loop)"); 1494 strcpy(connect_type, "(Loop)");
1427 break; 1495 break;
1428 1496
1429 case 1: 1497 case 1:
1430 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1498 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1431 ha->host_no)); 1499 vha->host_no));
1432 ha->switch_cap = sw_cap; 1500 ha->switch_cap = sw_cap;
1433 ha->current_topology = ISP_CFG_FL; 1501 ha->current_topology = ISP_CFG_FL;
1434 strcpy(connect_type, "(FL_Port)"); 1502 strcpy(connect_type, "(FL_Port)");
@@ -1436,7 +1504,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1436 1504
1437 case 2: 1505 case 2:
1438 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1506 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1439 ha->host_no)); 1507 vha->host_no));
1440 ha->operating_mode = P2P; 1508 ha->operating_mode = P2P;
1441 ha->current_topology = ISP_CFG_N; 1509 ha->current_topology = ISP_CFG_N;
1442 strcpy(connect_type, "(N_Port-to-N_Port)"); 1510 strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1444,7 +1512,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1444 1512
1445 case 3: 1513 case 3:
1446 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1514 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1447 ha->host_no)); 1515 vha->host_no));
1448 ha->switch_cap = sw_cap; 1516 ha->switch_cap = sw_cap;
1449 ha->operating_mode = P2P; 1517 ha->operating_mode = P2P;
1450 ha->current_topology = ISP_CFG_F; 1518 ha->current_topology = ISP_CFG_F;
@@ -1454,7 +1522,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1454 default: 1522 default:
1455 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1523 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1456 "Using NL.\n", 1524 "Using NL.\n",
1457 ha->host_no, topo)); 1525 vha->host_no, topo));
1458 ha->current_topology = ISP_CFG_NL; 1526 ha->current_topology = ISP_CFG_NL;
1459 strcpy(connect_type, "(Loop)"); 1527 strcpy(connect_type, "(Loop)");
1460 break; 1528 break;
@@ -1462,29 +1530,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1462 1530
1463 /* Save Host port and loop ID. */ 1531 /* Save Host port and loop ID. */
1464 /* byte order - Big Endian */ 1532 /* byte order - Big Endian */
1465 ha->d_id.b.domain = domain; 1533 vha->d_id.b.domain = domain;
1466 ha->d_id.b.area = area; 1534 vha->d_id.b.area = area;
1467 ha->d_id.b.al_pa = al_pa; 1535 vha->d_id.b.al_pa = al_pa;
1468 1536
1469 if (!ha->flags.init_done) 1537 if (!vha->flags.init_done)
1470 qla_printk(KERN_INFO, ha, 1538 qla_printk(KERN_INFO, ha,
1471 "Topology - %s, Host Loop address 0x%x\n", 1539 "Topology - %s, Host Loop address 0x%x\n",
1472 connect_type, ha->loop_id); 1540 connect_type, vha->loop_id);
1473 1541
1474 if (rval) { 1542 if (rval) {
1475 DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no)); 1543 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1476 } else { 1544 } else {
1477 DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no)); 1545 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1478 } 1546 }
1479 1547
1480 return(rval); 1548 return(rval);
1481} 1549}
1482 1550
1483static inline void 1551static inline void
1484qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def) 1552qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 char *def)
1485{ 1554{
1486 char *st, *en; 1555 char *st, *en;
1487 uint16_t index; 1556 uint16_t index;
1557 struct qla_hw_data *ha = vha->hw;
1488 1558
1489 if (memcmp(model, BINZERO, len) != 0) { 1559 if (memcmp(model, BINZERO, len) != 0) {
1490 strncpy(ha->model_number, model, len); 1560 strncpy(ha->model_number, model, len);
@@ -1516,16 +1586,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1516 } 1586 }
1517 } 1587 }
1518 if (IS_FWI2_CAPABLE(ha)) 1588 if (IS_FWI2_CAPABLE(ha))
1519 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc, 1589 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1520 sizeof(ha->model_desc)); 1590 sizeof(ha->model_desc));
1521} 1591}
1522 1592
1523/* On sparc systems, obtain port and node WWN from firmware 1593/* On sparc systems, obtain port and node WWN from firmware
1524 * properties. 1594 * properties.
1525 */ 1595 */
1526static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv) 1596static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1527{ 1597{
1528#ifdef CONFIG_SPARC 1598#ifdef CONFIG_SPARC
1599 struct qla_hw_data *ha = vha->hw;
1529 struct pci_dev *pdev = ha->pdev; 1600 struct pci_dev *pdev = ha->pdev;
1530 struct device_node *dp = pci_device_to_OF_node(pdev); 1601 struct device_node *dp = pci_device_to_OF_node(pdev);
1531 const u8 *val; 1602 const u8 *val;
@@ -1555,12 +1626,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1555* 0 = success. 1626* 0 = success.
1556*/ 1627*/
1557int 1628int
1558qla2x00_nvram_config(scsi_qla_host_t *ha) 1629qla2x00_nvram_config(scsi_qla_host_t *vha)
1559{ 1630{
1560 int rval; 1631 int rval;
1561 uint8_t chksum = 0; 1632 uint8_t chksum = 0;
1562 uint16_t cnt; 1633 uint16_t cnt;
1563 uint8_t *dptr1, *dptr2; 1634 uint8_t *dptr1, *dptr2;
1635 struct qla_hw_data *ha = vha->hw;
1564 init_cb_t *icb = ha->init_cb; 1636 init_cb_t *icb = ha->init_cb;
1565 nvram_t *nv = ha->nvram; 1637 nvram_t *nv = ha->nvram;
1566 uint8_t *ptr = ha->nvram; 1638 uint8_t *ptr = ha->nvram;
@@ -1576,11 +1648,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1576 ha->nvram_base = 0x80; 1648 ha->nvram_base = 0x80;
1577 1649
1578 /* Get NVRAM data and calculate checksum. */ 1650 /* Get NVRAM data and calculate checksum. */
1579 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1651 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1580 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1652 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1581 chksum += *ptr++; 1653 chksum += *ptr++;
1582 1654
1583 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 1655 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1584 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1656 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1585 1657
1586 /* Bad NVRAM data, set defaults parameters. */ 1658 /* Bad NVRAM data, set defaults parameters. */
@@ -1594,7 +1666,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1594 "invalid -- WWPN) defaults.\n"); 1666 "invalid -- WWPN) defaults.\n");
1595 1667
1596 if (chksum) 1668 if (chksum)
1597 qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0, 1669 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1598 MSW(chksum), LSW(chksum)); 1670 MSW(chksum), LSW(chksum));
1599 1671
1600 /* 1672 /*
@@ -1631,7 +1703,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1631 nv->port_name[3] = 224; 1703 nv->port_name[3] = 224;
1632 nv->port_name[4] = 139; 1704 nv->port_name[4] = 139;
1633 1705
1634 qla2xxx_nvram_wwn_from_ofw(ha, nv); 1706 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1635 1707
1636 nv->login_timeout = 4; 1708 nv->login_timeout = 4;
1637 1709
@@ -1684,7 +1756,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1684 strcpy(ha->model_number, "QLA2300"); 1756 strcpy(ha->model_number, "QLA2300");
1685 } 1757 }
1686 } else { 1758 } else {
1687 qla2x00_set_model_info(ha, nv->model_number, 1759 qla2x00_set_model_info(vha, nv->model_number,
1688 sizeof(nv->model_number), "QLA23xx"); 1760 sizeof(nv->model_number), "QLA23xx");
1689 } 1761 }
1690 } else if (IS_QLA2200(ha)) { 1762 } else if (IS_QLA2200(ha)) {
@@ -1760,8 +1832,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1760 ha->serial0 = icb->port_name[5]; 1832 ha->serial0 = icb->port_name[5];
1761 ha->serial1 = icb->port_name[6]; 1833 ha->serial1 = icb->port_name[6];
1762 ha->serial2 = icb->port_name[7]; 1834 ha->serial2 = icb->port_name[7];
1763 ha->node_name = icb->node_name; 1835 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1764 ha->port_name = icb->port_name; 1836 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1765 1837
1766 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1838 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1767 1839
@@ -1829,10 +1901,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1829 icb->response_accumulation_timer = 3; 1901 icb->response_accumulation_timer = 3;
1830 icb->interrupt_delay_timer = 5; 1902 icb->interrupt_delay_timer = 5;
1831 1903
1832 ha->flags.process_response_queue = 1; 1904 vha->flags.process_response_queue = 1;
1833 } else { 1905 } else {
1834 /* Enable ZIO. */ 1906 /* Enable ZIO. */
1835 if (!ha->flags.init_done) { 1907 if (!vha->flags.init_done) {
1836 ha->zio_mode = icb->add_firmware_options[0] & 1908 ha->zio_mode = icb->add_firmware_options[0] &
1837 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1909 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1838 ha->zio_timer = icb->interrupt_delay_timer ? 1910 ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1840,12 +1912,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1840 } 1912 }
1841 icb->add_firmware_options[0] &= 1913 icb->add_firmware_options[0] &=
1842 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1914 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1843 ha->flags.process_response_queue = 0; 1915 vha->flags.process_response_queue = 0;
1844 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1916 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1845 ha->zio_mode = QLA_ZIO_MODE_6; 1917 ha->zio_mode = QLA_ZIO_MODE_6;
1846 1918
1847 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1919 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1848 "delay (%d us).\n", ha->host_no, ha->zio_mode, 1920 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1849 ha->zio_timer * 100)); 1921 ha->zio_timer * 100));
1850 qla_printk(KERN_INFO, ha, 1922 qla_printk(KERN_INFO, ha,
1851 "ZIO mode %d enabled; timer delay (%d us).\n", 1923 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1853,13 +1925,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1853 1925
1854 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1926 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1855 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1927 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1856 ha->flags.process_response_queue = 1; 1928 vha->flags.process_response_queue = 1;
1857 } 1929 }
1858 } 1930 }
1859 1931
1860 if (rval) { 1932 if (rval) {
1861 DEBUG2_3(printk(KERN_WARNING 1933 DEBUG2_3(printk(KERN_WARNING
1862 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 1934 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1863 } 1935 }
1864 return (rval); 1936 return (rval);
1865} 1937}
@@ -1870,10 +1942,10 @@ qla2x00_rport_del(void *data)
1870 fc_port_t *fcport = data; 1942 fc_port_t *fcport = data;
1871 struct fc_rport *rport; 1943 struct fc_rport *rport;
1872 1944
1873 spin_lock_irq(fcport->ha->host->host_lock); 1945 spin_lock_irq(fcport->vha->host->host_lock);
1874 rport = fcport->drport; 1946 rport = fcport->drport;
1875 fcport->drport = NULL; 1947 fcport->drport = NULL;
1876 spin_unlock_irq(fcport->ha->host->host_lock); 1948 spin_unlock_irq(fcport->vha->host->host_lock);
1877 if (rport) 1949 if (rport)
1878 fc_remote_port_delete(rport); 1950 fc_remote_port_delete(rport);
1879} 1951}
@@ -1886,7 +1958,7 @@ qla2x00_rport_del(void *data)
1886 * Returns a pointer to the allocated fcport, or NULL, if none available. 1958 * Returns a pointer to the allocated fcport, or NULL, if none available.
1887 */ 1959 */
1888static fc_port_t * 1960static fc_port_t *
1889qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1961qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1890{ 1962{
1891 fc_port_t *fcport; 1963 fc_port_t *fcport;
1892 1964
@@ -1895,8 +1967,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1895 return NULL; 1967 return NULL;
1896 1968
1897 /* Setup fcport template structure. */ 1969 /* Setup fcport template structure. */
1898 fcport->ha = ha; 1970 fcport->vha = vha;
1899 fcport->vp_idx = ha->vp_idx; 1971 fcport->vp_idx = vha->vp_idx;
1900 fcport->port_type = FCT_UNKNOWN; 1972 fcport->port_type = FCT_UNKNOWN;
1901 fcport->loop_id = FC_NO_LOOP_ID; 1973 fcport->loop_id = FC_NO_LOOP_ID;
1902 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1974 atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1919,101 +1991,97 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1919 * 2 = database was full and device was not configured. 1991 * 2 = database was full and device was not configured.
1920 */ 1992 */
1921static int 1993static int
1922qla2x00_configure_loop(scsi_qla_host_t *ha) 1994qla2x00_configure_loop(scsi_qla_host_t *vha)
1923{ 1995{
1924 int rval; 1996 int rval;
1925 unsigned long flags, save_flags; 1997 unsigned long flags, save_flags;
1926 1998 struct qla_hw_data *ha = vha->hw;
1927 rval = QLA_SUCCESS; 1999 rval = QLA_SUCCESS;
1928 2000
1929 /* Get Initiator ID */ 2001 /* Get Initiator ID */
1930 if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) { 2002 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
1931 rval = qla2x00_configure_hba(ha); 2003 rval = qla2x00_configure_hba(vha);
1932 if (rval != QLA_SUCCESS) { 2004 if (rval != QLA_SUCCESS) {
1933 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2005 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
1934 ha->host_no)); 2006 vha->host_no));
1935 return (rval); 2007 return (rval);
1936 } 2008 }
1937 } 2009 }
1938 2010
1939 save_flags = flags = ha->dpc_flags; 2011 save_flags = flags = vha->dpc_flags;
1940 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2012 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
1941 ha->host_no, flags)); 2013 vha->host_no, flags));
1942 2014
1943 /* 2015 /*
1944 * If we have both an RSCN and PORT UPDATE pending then handle them 2016 * If we have both an RSCN and PORT UPDATE pending then handle them
1945 * both at the same time. 2017 * both at the same time.
1946 */ 2018 */
1947 clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2019 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1948 clear_bit(RSCN_UPDATE, &ha->dpc_flags); 2020 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1949 2021
1950 /* Determine what we need to do */ 2022 /* Determine what we need to do */
1951 if (ha->current_topology == ISP_CFG_FL && 2023 if (ha->current_topology == ISP_CFG_FL &&
1952 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2024 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1953 2025
1954 ha->flags.rscn_queue_overflow = 1; 2026 vha->flags.rscn_queue_overflow = 1;
1955 set_bit(RSCN_UPDATE, &flags); 2027 set_bit(RSCN_UPDATE, &flags);
1956 2028
1957 } else if (ha->current_topology == ISP_CFG_F && 2029 } else if (ha->current_topology == ISP_CFG_F &&
1958 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2030 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1959 2031
1960 ha->flags.rscn_queue_overflow = 1; 2032 vha->flags.rscn_queue_overflow = 1;
1961 set_bit(RSCN_UPDATE, &flags); 2033 set_bit(RSCN_UPDATE, &flags);
1962 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2034 clear_bit(LOCAL_LOOP_UPDATE, &flags);
1963 2035
1964 } else if (ha->current_topology == ISP_CFG_N) { 2036 } else if (ha->current_topology == ISP_CFG_N) {
1965 clear_bit(RSCN_UPDATE, &flags); 2037 clear_bit(RSCN_UPDATE, &flags);
1966 2038
1967 } else if (!ha->flags.online || 2039 } else if (!vha->flags.online ||
1968 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2040 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1969 2041
1970 ha->flags.rscn_queue_overflow = 1; 2042 vha->flags.rscn_queue_overflow = 1;
1971 set_bit(RSCN_UPDATE, &flags); 2043 set_bit(RSCN_UPDATE, &flags);
1972 set_bit(LOCAL_LOOP_UPDATE, &flags); 2044 set_bit(LOCAL_LOOP_UPDATE, &flags);
1973 } 2045 }
1974 2046
1975 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2047 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
1976 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2048 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1977 rval = QLA_FUNCTION_FAILED; 2049 rval = QLA_FUNCTION_FAILED;
1978 } else { 2050 else
1979 rval = qla2x00_configure_local_loop(ha); 2051 rval = qla2x00_configure_local_loop(vha);
1980 }
1981 } 2052 }
1982 2053
1983 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2054 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
1984 if (LOOP_TRANSITION(ha)) { 2055 if (LOOP_TRANSITION(vha))
1985 rval = QLA_FUNCTION_FAILED; 2056 rval = QLA_FUNCTION_FAILED;
1986 } else { 2057 else
1987 rval = qla2x00_configure_fabric(ha); 2058 rval = qla2x00_configure_fabric(vha);
1988 }
1989 } 2059 }
1990 2060
1991 if (rval == QLA_SUCCESS) { 2061 if (rval == QLA_SUCCESS) {
1992 if (atomic_read(&ha->loop_down_timer) || 2062 if (atomic_read(&vha->loop_down_timer) ||
1993 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2063 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1994 rval = QLA_FUNCTION_FAILED; 2064 rval = QLA_FUNCTION_FAILED;
1995 } else { 2065 } else {
1996 atomic_set(&ha->loop_state, LOOP_READY); 2066 atomic_set(&vha->loop_state, LOOP_READY);
1997 2067
1998 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no)); 2068 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
1999 } 2069 }
2000 } 2070 }
2001 2071
2002 if (rval) { 2072 if (rval) {
2003 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2073 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2004 __func__, ha->host_no)); 2074 __func__, vha->host_no));
2005 } else { 2075 } else {
2006 DEBUG3(printk("%s: exiting normally\n", __func__)); 2076 DEBUG3(printk("%s: exiting normally\n", __func__));
2007 } 2077 }
2008 2078
2009 /* Restore state if a resync event occurred during processing */ 2079 /* Restore state if a resync event occurred during processing */
2010 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2080 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2081 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2012 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2082 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2013 if (test_bit(RSCN_UPDATE, &save_flags)) { 2083 if (test_bit(RSCN_UPDATE, &save_flags))
2014 ha->flags.rscn_queue_overflow = 1; 2084 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2015 set_bit(RSCN_UPDATE, &ha->dpc_flags);
2016 }
2017 } 2085 }
2018 2086
2019 return (rval); 2087 return (rval);
@@ -2032,7 +2100,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2032 * 0 = success. 2100 * 0 = success.
2033 */ 2101 */
2034static int 2102static int
2035qla2x00_configure_local_loop(scsi_qla_host_t *ha) 2103qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2036{ 2104{
2037 int rval, rval2; 2105 int rval, rval2;
2038 int found_devs; 2106 int found_devs;
@@ -2044,18 +2112,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2044 char *id_iter; 2112 char *id_iter;
2045 uint16_t loop_id; 2113 uint16_t loop_id;
2046 uint8_t domain, area, al_pa; 2114 uint8_t domain, area, al_pa;
2047 scsi_qla_host_t *pha = to_qla_parent(ha); 2115 struct qla_hw_data *ha = vha->hw;
2048 2116
2049 found_devs = 0; 2117 found_devs = 0;
2050 new_fcport = NULL; 2118 new_fcport = NULL;
2051 entries = MAX_FIBRE_DEVICES; 2119 entries = MAX_FIBRE_DEVICES;
2052 2120
2053 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no)); 2121 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2054 DEBUG3(qla2x00_get_fcal_position_map(ha, NULL)); 2122 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2055 2123
2056 /* Get list of logged in devices. */ 2124 /* Get list of logged in devices. */
2057 memset(ha->gid_list, 0, GID_LIST_SIZE); 2125 memset(ha->gid_list, 0, GID_LIST_SIZE);
2058 rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma, 2126 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2059 &entries); 2127 &entries);
2060 if (rval != QLA_SUCCESS) 2128 if (rval != QLA_SUCCESS)
2061 goto cleanup_allocation; 2129 goto cleanup_allocation;
@@ -2066,7 +2134,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2066 entries * sizeof(struct gid_list_info))); 2134 entries * sizeof(struct gid_list_info)));
2067 2135
2068 /* Allocate temporary fcport for any new fcports discovered. */ 2136 /* Allocate temporary fcport for any new fcports discovered. */
2069 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2137 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2070 if (new_fcport == NULL) { 2138 if (new_fcport == NULL) {
2071 rval = QLA_MEMORY_ALLOC_FAILED; 2139 rval = QLA_MEMORY_ALLOC_FAILED;
2072 goto cleanup_allocation; 2140 goto cleanup_allocation;
@@ -2076,17 +2144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2076 /* 2144 /*
2077 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2145 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2078 */ 2146 */
2079 list_for_each_entry(fcport, &pha->fcports, list) { 2147 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2080 if (fcport->vp_idx != ha->vp_idx)
2081 continue;
2082
2083 if (atomic_read(&fcport->state) == FCS_ONLINE && 2148 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2084 fcport->port_type != FCT_BROADCAST && 2149 fcport->port_type != FCT_BROADCAST &&
2085 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2150 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2086 2151
2087 DEBUG(printk("scsi(%ld): Marking port lost, " 2152 DEBUG(printk("scsi(%ld): Marking port lost, "
2088 "loop_id=0x%04x\n", 2153 "loop_id=0x%04x\n",
2089 ha->host_no, fcport->loop_id)); 2154 vha->host_no, fcport->loop_id));
2090 2155
2091 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2156 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2092 fcport->flags &= ~FCF_FARP_DONE; 2157 fcport->flags &= ~FCF_FARP_DONE;
@@ -2113,7 +2178,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2113 2178
2114 /* Bypass if not same domain and area of adapter. */ 2179 /* Bypass if not same domain and area of adapter. */
2115 if (area && domain && 2180 if (area && domain &&
2116 (area != ha->d_id.b.area || domain != ha->d_id.b.domain)) 2181 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2117 continue; 2182 continue;
2118 2183
2119 /* Bypass invalid local loop ID. */ 2184 /* Bypass invalid local loop ID. */
@@ -2125,26 +2190,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2125 new_fcport->d_id.b.area = area; 2190 new_fcport->d_id.b.area = area;
2126 new_fcport->d_id.b.al_pa = al_pa; 2191 new_fcport->d_id.b.al_pa = al_pa;
2127 new_fcport->loop_id = loop_id; 2192 new_fcport->loop_id = loop_id;
2128 new_fcport->vp_idx = ha->vp_idx; 2193 new_fcport->vp_idx = vha->vp_idx;
2129 rval2 = qla2x00_get_port_database(ha, new_fcport, 0); 2194 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2130 if (rval2 != QLA_SUCCESS) { 2195 if (rval2 != QLA_SUCCESS) {
2131 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2196 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2132 "information -- get_port_database=%x, " 2197 "information -- get_port_database=%x, "
2133 "loop_id=0x%04x\n", 2198 "loop_id=0x%04x\n",
2134 ha->host_no, rval2, new_fcport->loop_id)); 2199 vha->host_no, rval2, new_fcport->loop_id));
2135 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2200 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2136 ha->host_no)); 2201 vha->host_no));
2137 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 2202 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2138 continue; 2203 continue;
2139 } 2204 }
2140 2205
2141 /* Check for matching device in port list. */ 2206 /* Check for matching device in port list. */
2142 found = 0; 2207 found = 0;
2143 fcport = NULL; 2208 fcport = NULL;
2144 list_for_each_entry(fcport, &pha->fcports, list) { 2209 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2145 if (fcport->vp_idx != ha->vp_idx)
2146 continue;
2147
2148 if (memcmp(new_fcport->port_name, fcport->port_name, 2210 if (memcmp(new_fcport->port_name, fcport->port_name,
2149 WWN_SIZE)) 2211 WWN_SIZE))
2150 continue; 2212 continue;
@@ -2164,17 +2226,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2164 if (!found) { 2226 if (!found) {
2165 /* New device, add to fcports list. */ 2227 /* New device, add to fcports list. */
2166 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2228 new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
2167 if (ha->parent) { 2229 if (vha->vp_idx) {
2168 new_fcport->ha = ha; 2230 new_fcport->vha = vha;
2169 new_fcport->vp_idx = ha->vp_idx; 2231 new_fcport->vp_idx = vha->vp_idx;
2170 list_add_tail(&new_fcport->vp_fcport,
2171 &ha->vp_fcports);
2172 } 2232 }
2173 list_add_tail(&new_fcport->list, &pha->fcports); 2233 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2174 2234
2175 /* Allocate a new replacement fcport. */ 2235 /* Allocate a new replacement fcport. */
2176 fcport = new_fcport; 2236 fcport = new_fcport;
2177 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2237 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2178 if (new_fcport == NULL) { 2238 if (new_fcport == NULL) {
2179 rval = QLA_MEMORY_ALLOC_FAILED; 2239 rval = QLA_MEMORY_ALLOC_FAILED;
2180 goto cleanup_allocation; 2240 goto cleanup_allocation;
@@ -2185,7 +2245,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2185 /* Base iIDMA settings on HBA port speed. */ 2245 /* Base iIDMA settings on HBA port speed. */
2186 fcport->fp_speed = ha->link_data_rate; 2246 fcport->fp_speed = ha->link_data_rate;
2187 2247
2188 qla2x00_update_fcport(ha, fcport); 2248 qla2x00_update_fcport(vha, fcport);
2189 2249
2190 found_devs++; 2250 found_devs++;
2191 } 2251 }
@@ -2195,24 +2255,25 @@ cleanup_allocation:
2195 2255
2196 if (rval != QLA_SUCCESS) { 2256 if (rval != QLA_SUCCESS) {
2197 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2257 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2198 "rval=%x\n", ha->host_no, rval)); 2258 "rval=%x\n", vha->host_no, rval));
2199 } 2259 }
2200 2260
2201 if (found_devs) { 2261 if (found_devs) {
2202 ha->device_flags |= DFLG_LOCAL_DEVICES; 2262 vha->device_flags |= DFLG_LOCAL_DEVICES;
2203 ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES; 2263 vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
2204 } 2264 }
2205 2265
2206 return (rval); 2266 return (rval);
2207} 2267}
2208 2268
2209static void 2269static void
2210qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2270qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2211{ 2271{
2212#define LS_UNKNOWN 2 2272#define LS_UNKNOWN 2
2213 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2273 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
2214 int rval; 2274 int rval;
2215 uint16_t mb[6]; 2275 uint16_t mb[6];
2276 struct qla_hw_data *ha = vha->hw;
2216 2277
2217 if (!IS_IIDMA_CAPABLE(ha)) 2278 if (!IS_IIDMA_CAPABLE(ha))
2218 return; 2279 return;
@@ -2221,12 +2282,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2221 fcport->fp_speed > ha->link_data_rate) 2282 fcport->fp_speed > ha->link_data_rate)
2222 return; 2283 return;
2223 2284
2224 rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed, 2285 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2225 mb); 2286 mb);
2226 if (rval != QLA_SUCCESS) { 2287 if (rval != QLA_SUCCESS) {
2227 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2288 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2228 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2289 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2229 ha->host_no, fcport->port_name[0], fcport->port_name[1], 2290 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2230 fcport->port_name[2], fcport->port_name[3], 2291 fcport->port_name[2], fcport->port_name[3],
2231 fcport->port_name[4], fcport->port_name[5], 2292 fcport->port_name[4], fcport->port_name[5],
2232 fcport->port_name[6], fcport->port_name[7], rval, 2293 fcport->port_name[6], fcport->port_name[7], rval,
@@ -2244,10 +2305,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2244} 2305}
2245 2306
2246static void 2307static void
2247qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport) 2308qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2248{ 2309{
2249 struct fc_rport_identifiers rport_ids; 2310 struct fc_rport_identifiers rport_ids;
2250 struct fc_rport *rport; 2311 struct fc_rport *rport;
2312 struct qla_hw_data *ha = vha->hw;
2251 2313
2252 if (fcport->drport) 2314 if (fcport->drport)
2253 qla2x00_rport_del(fcport); 2315 qla2x00_rport_del(fcport);
@@ -2257,15 +2319,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2257 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2319 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2258 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2320 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2259 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2321 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2260 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2322 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2261 if (!rport) { 2323 if (!rport) {
2262 qla_printk(KERN_WARNING, ha, 2324 qla_printk(KERN_WARNING, ha,
2263 "Unable to allocate fc remote port!\n"); 2325 "Unable to allocate fc remote port!\n");
2264 return; 2326 return;
2265 } 2327 }
2266 spin_lock_irq(fcport->ha->host->host_lock); 2328 spin_lock_irq(fcport->vha->host->host_lock);
2267 *((fc_port_t **)rport->dd_data) = fcport; 2329 *((fc_port_t **)rport->dd_data) = fcport;
2268 spin_unlock_irq(fcport->ha->host->host_lock); 2330 spin_unlock_irq(fcport->vha->host->host_lock);
2269 2331
2270 rport->supported_classes = fcport->supported_classes; 2332 rport->supported_classes = fcport->supported_classes;
2271 2333
@@ -2293,23 +2355,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2293 * Kernel context. 2355 * Kernel context.
2294 */ 2356 */
2295void 2357void
2296qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2358qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2297{ 2359{
2298 scsi_qla_host_t *pha = to_qla_parent(ha); 2360 struct qla_hw_data *ha = vha->hw;
2299 2361
2300 fcport->ha = ha; 2362 fcport->vha = vha;
2301 fcport->login_retry = 0; 2363 fcport->login_retry = 0;
2302 fcport->port_login_retry_count = pha->port_down_retry_count * 2364 fcport->port_login_retry_count = ha->port_down_retry_count *
2303 PORT_RETRY_TIME; 2365 PORT_RETRY_TIME;
2304 atomic_set(&fcport->port_down_timer, pha->port_down_retry_count * 2366 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2305 PORT_RETRY_TIME); 2367 PORT_RETRY_TIME);
2306 fcport->flags &= ~FCF_LOGIN_NEEDED; 2368 fcport->flags &= ~FCF_LOGIN_NEEDED;
2307 2369
2308 qla2x00_iidma_fcport(ha, fcport); 2370 qla2x00_iidma_fcport(vha, fcport);
2309 2371
2310 atomic_set(&fcport->state, FCS_ONLINE); 2372 atomic_set(&fcport->state, FCS_ONLINE);
2311 2373
2312 qla2x00_reg_remote_port(ha, fcport); 2374 qla2x00_reg_remote_port(vha, fcport);
2313} 2375}
2314 2376
2315/* 2377/*
@@ -2324,7 +2386,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2324 * BIT_0 = error 2386 * BIT_0 = error
2325 */ 2387 */
2326static int 2388static int
2327qla2x00_configure_fabric(scsi_qla_host_t *ha) 2389qla2x00_configure_fabric(scsi_qla_host_t *vha)
2328{ 2390{
2329 int rval, rval2; 2391 int rval, rval2;
2330 fc_port_t *fcport, *fcptemp; 2392 fc_port_t *fcport, *fcptemp;
@@ -2332,25 +2394,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2332 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2394 uint16_t mb[MAILBOX_REGISTER_COUNT];
2333 uint16_t loop_id; 2395 uint16_t loop_id;
2334 LIST_HEAD(new_fcports); 2396 LIST_HEAD(new_fcports);
2335 scsi_qla_host_t *pha = to_qla_parent(ha); 2397 struct qla_hw_data *ha = vha->hw;
2398 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2336 2399
2337 /* If FL port exists, then SNS is present */ 2400 /* If FL port exists, then SNS is present */
2338 if (IS_FWI2_CAPABLE(ha)) 2401 if (IS_FWI2_CAPABLE(ha))
2339 loop_id = NPH_F_PORT; 2402 loop_id = NPH_F_PORT;
2340 else 2403 else
2341 loop_id = SNS_FL_PORT; 2404 loop_id = SNS_FL_PORT;
2342 rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1); 2405 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2343 if (rval != QLA_SUCCESS) { 2406 if (rval != QLA_SUCCESS) {
2344 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2407 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2345 "Port\n", ha->host_no)); 2408 "Port\n", vha->host_no));
2346 2409
2347 ha->device_flags &= ~SWITCH_FOUND; 2410 vha->device_flags &= ~SWITCH_FOUND;
2348 return (QLA_SUCCESS); 2411 return (QLA_SUCCESS);
2349 } 2412 }
2350 ha->device_flags |= SWITCH_FOUND; 2413 vha->device_flags |= SWITCH_FOUND;
2351 2414
2352 /* Mark devices that need re-synchronization. */ 2415 /* Mark devices that need re-synchronization. */
2353 rval2 = qla2x00_device_resync(ha); 2416 rval2 = qla2x00_device_resync(vha);
2354 if (rval2 == QLA_RSCNS_HANDLED) { 2417 if (rval2 == QLA_RSCNS_HANDLED) {
2355 /* No point doing the scan, just continue. */ 2418 /* No point doing the scan, just continue. */
2356 return (QLA_SUCCESS); 2419 return (QLA_SUCCESS);
@@ -2358,15 +2421,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2358 do { 2421 do {
2359 /* FDMI support. */ 2422 /* FDMI support. */
2360 if (ql2xfdmienable && 2423 if (ql2xfdmienable &&
2361 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags)) 2424 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2362 qla2x00_fdmi_register(ha); 2425 qla2x00_fdmi_register(vha);
2363 2426
2364 /* Ensure we are logged into the SNS. */ 2427 /* Ensure we are logged into the SNS. */
2365 if (IS_FWI2_CAPABLE(ha)) 2428 if (IS_FWI2_CAPABLE(ha))
2366 loop_id = NPH_SNS; 2429 loop_id = NPH_SNS;
2367 else 2430 else
2368 loop_id = SIMPLE_NAME_SERVER; 2431 loop_id = SIMPLE_NAME_SERVER;
2369 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff, 2432 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2370 0xfc, mb, BIT_1 | BIT_0); 2433 0xfc, mb, BIT_1 | BIT_0);
2371 if (mb[0] != MBS_COMMAND_COMPLETE) { 2434 if (mb[0] != MBS_COMMAND_COMPLETE) {
2372 DEBUG2(qla_printk(KERN_INFO, ha, 2435 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2376,29 +2439,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2376 return (QLA_SUCCESS); 2439 return (QLA_SUCCESS);
2377 } 2440 }
2378 2441
2379 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) { 2442 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2380 if (qla2x00_rft_id(ha)) { 2443 if (qla2x00_rft_id(vha)) {
2381 /* EMPTY */ 2444 /* EMPTY */
2382 DEBUG2(printk("scsi(%ld): Register FC-4 " 2445 DEBUG2(printk("scsi(%ld): Register FC-4 "
2383 "TYPE failed.\n", ha->host_no)); 2446 "TYPE failed.\n", vha->host_no));
2384 } 2447 }
2385 if (qla2x00_rff_id(ha)) { 2448 if (qla2x00_rff_id(vha)) {
2386 /* EMPTY */ 2449 /* EMPTY */
2387 DEBUG2(printk("scsi(%ld): Register FC-4 " 2450 DEBUG2(printk("scsi(%ld): Register FC-4 "
2388 "Features failed.\n", ha->host_no)); 2451 "Features failed.\n", vha->host_no));
2389 } 2452 }
2390 if (qla2x00_rnn_id(ha)) { 2453 if (qla2x00_rnn_id(vha)) {
2391 /* EMPTY */ 2454 /* EMPTY */
2392 DEBUG2(printk("scsi(%ld): Register Node Name " 2455 DEBUG2(printk("scsi(%ld): Register Node Name "
2393 "failed.\n", ha->host_no)); 2456 "failed.\n", vha->host_no));
2394 } else if (qla2x00_rsnn_nn(ha)) { 2457 } else if (qla2x00_rsnn_nn(vha)) {
2395 /* EMPTY */ 2458 /* EMPTY */
2396 DEBUG2(printk("scsi(%ld): Register Symbolic " 2459 DEBUG2(printk("scsi(%ld): Register Symbolic "
2397 "Node Name failed.\n", ha->host_no)); 2460 "Node Name failed.\n", vha->host_no));
2398 } 2461 }
2399 } 2462 }
2400 2463
2401 rval = qla2x00_find_all_fabric_devs(ha, &new_fcports); 2464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2402 if (rval != QLA_SUCCESS) 2465 if (rval != QLA_SUCCESS)
2403 break; 2466 break;
2404 2467
@@ -2406,24 +2469,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2406 * Logout all previous fabric devices marked lost, except 2469 * Logout all previous fabric devices marked lost, except
2407 * tape devices. 2470 * tape devices.
2408 */ 2471 */
2409 list_for_each_entry(fcport, &pha->fcports, list) { 2472 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2410 if (fcport->vp_idx !=ha->vp_idx) 2473 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2411 continue;
2412
2413 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2414 break; 2474 break;
2415 2475
2416 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2417 continue; 2477 continue;
2418 2478
2419 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2479 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2420 qla2x00_mark_device_lost(ha, fcport, 2480 qla2x00_mark_device_lost(vha, fcport,
2421 ql2xplogiabsentdevice, 0); 2481 ql2xplogiabsentdevice, 0);
2422 if (fcport->loop_id != FC_NO_LOOP_ID && 2482 if (fcport->loop_id != FC_NO_LOOP_ID &&
2423 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2483 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2424 fcport->port_type != FCT_INITIATOR && 2484 fcport->port_type != FCT_INITIATOR &&
2425 fcport->port_type != FCT_BROADCAST) { 2485 fcport->port_type != FCT_BROADCAST) {
2426 ha->isp_ops->fabric_logout(ha, 2486 ha->isp_ops->fabric_logout(vha,
2427 fcport->loop_id, 2487 fcport->loop_id,
2428 fcport->d_id.b.domain, 2488 fcport->d_id.b.domain,
2429 fcport->d_id.b.area, 2489 fcport->d_id.b.area,
@@ -2434,18 +2494,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2434 } 2494 }
2435 2495
2436 /* Starting free loop ID. */ 2496 /* Starting free loop ID. */
2437 next_loopid = pha->min_external_loopid; 2497 next_loopid = ha->min_external_loopid;
2438 2498
2439 /* 2499 /*
2440 * Scan through our port list and login entries that need to be 2500 * Scan through our port list and login entries that need to be
2441 * logged in. 2501 * logged in.
2442 */ 2502 */
2443 list_for_each_entry(fcport, &pha->fcports, list) { 2503 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2444 if (fcport->vp_idx != ha->vp_idx) 2504 if (atomic_read(&vha->loop_down_timer) ||
2445 continue; 2505 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2446
2447 if (atomic_read(&ha->loop_down_timer) ||
2448 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2449 break; 2506 break;
2450 2507
2451 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2508 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2455,14 +2512,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2455 if (fcport->loop_id == FC_NO_LOOP_ID) { 2512 if (fcport->loop_id == FC_NO_LOOP_ID) {
2456 fcport->loop_id = next_loopid; 2513 fcport->loop_id = next_loopid;
2457 rval = qla2x00_find_new_loop_id( 2514 rval = qla2x00_find_new_loop_id(
2458 to_qla_parent(ha), fcport); 2515 base_vha, fcport);
2459 if (rval != QLA_SUCCESS) { 2516 if (rval != QLA_SUCCESS) {
2460 /* Ran out of IDs to use */ 2517 /* Ran out of IDs to use */
2461 break; 2518 break;
2462 } 2519 }
2463 } 2520 }
2464 /* Login and update database */ 2521 /* Login and update database */
2465 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2522 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2466 } 2523 }
2467 2524
2468 /* Exit if out of loop IDs. */ 2525 /* Exit if out of loop IDs. */
@@ -2474,31 +2531,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2474 * Login and add the new devices to our port list. 2531 * Login and add the new devices to our port list.
2475 */ 2532 */
2476 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2533 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2477 if (atomic_read(&ha->loop_down_timer) || 2534 if (atomic_read(&vha->loop_down_timer) ||
2478 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2535 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2479 break; 2536 break;
2480 2537
2481 /* Find a new loop ID to use. */ 2538 /* Find a new loop ID to use. */
2482 fcport->loop_id = next_loopid; 2539 fcport->loop_id = next_loopid;
2483 rval = qla2x00_find_new_loop_id(to_qla_parent(ha), 2540 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2484 fcport);
2485 if (rval != QLA_SUCCESS) { 2541 if (rval != QLA_SUCCESS) {
2486 /* Ran out of IDs to use */ 2542 /* Ran out of IDs to use */
2487 break; 2543 break;
2488 } 2544 }
2489 2545
2490 /* Login and update database */ 2546 /* Login and update database */
2491 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2547 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2492 2548
2493 if (ha->parent) { 2549 if (vha->vp_idx) {
2494 fcport->ha = ha; 2550 fcport->vha = vha;
2495 fcport->vp_idx = ha->vp_idx; 2551 fcport->vp_idx = vha->vp_idx;
2496 list_add_tail(&fcport->vp_fcport, 2552 }
2497 &ha->vp_fcports); 2553 list_move_tail(&fcport->list, &vha->vp_fcports);
2498 list_move_tail(&fcport->list,
2499 &ha->parent->fcports);
2500 } else
2501 list_move_tail(&fcport->list, &ha->fcports);
2502 } 2554 }
2503 } while (0); 2555 } while (0);
2504 2556
@@ -2510,7 +2562,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2510 2562
2511 if (rval) { 2563 if (rval) {
2512 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2564 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2513 "rval=%d\n", ha->host_no, rval)); 2565 "rval=%d\n", vha->host_no, rval));
2514 } 2566 }
2515 2567
2516 return (rval); 2568 return (rval);
@@ -2531,7 +2583,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2531 * Kernel context. 2583 * Kernel context.
2532 */ 2584 */
2533static int 2585static int
2534qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports) 2586qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2587 struct list_head *new_fcports)
2535{ 2588{
2536 int rval; 2589 int rval;
2537 uint16_t loop_id; 2590 uint16_t loop_id;
@@ -2542,11 +2595,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2542 int swl_idx; 2595 int swl_idx;
2543 int first_dev, last_dev; 2596 int first_dev, last_dev;
2544 port_id_t wrap, nxt_d_id; 2597 port_id_t wrap, nxt_d_id;
2545 int vp_index; 2598 struct qla_hw_data *ha = vha->hw;
2546 int empty_vp_index; 2599 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2547 int found_vp;
2548 scsi_qla_host_t *vha;
2549 scsi_qla_host_t *pha = to_qla_parent(ha);
2550 2600
2551 rval = QLA_SUCCESS; 2601 rval = QLA_SUCCESS;
2552 2602
@@ -2555,43 +2605,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2555 if (!swl) { 2605 if (!swl) {
2556 /*EMPTY*/ 2606 /*EMPTY*/
2557 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2607 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2558 "on GA_NXT\n", ha->host_no)); 2608 "on GA_NXT\n", vha->host_no));
2559 } else { 2609 } else {
2560 if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) { 2610 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2561 kfree(swl); 2611 kfree(swl);
2562 swl = NULL; 2612 swl = NULL;
2563 } else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) { 2613 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2564 kfree(swl); 2614 kfree(swl);
2565 swl = NULL; 2615 swl = NULL;
2566 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2616 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2567 kfree(swl); 2617 kfree(swl);
2568 swl = NULL; 2618 swl = NULL;
2569 } else if (ql2xiidmaenable && 2619 } else if (ql2xiidmaenable &&
2570 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2620 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2571 qla2x00_gpsc(ha, swl); 2621 qla2x00_gpsc(vha, swl);
2572 } 2622 }
2573 } 2623 }
2574 swl_idx = 0; 2624 swl_idx = 0;
2575 2625
2576 /* Allocate temporary fcport for any new fcports discovered. */ 2626 /* Allocate temporary fcport for any new fcports discovered. */
2577 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2627 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2578 if (new_fcport == NULL) { 2628 if (new_fcport == NULL) {
2579 kfree(swl); 2629 kfree(swl);
2580 return (QLA_MEMORY_ALLOC_FAILED); 2630 return (QLA_MEMORY_ALLOC_FAILED);
2581 } 2631 }
2582 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2632 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2583 new_fcport->vp_idx = ha->vp_idx;
2584 /* Set start port ID scan at adapter ID. */ 2633 /* Set start port ID scan at adapter ID. */
2585 first_dev = 1; 2634 first_dev = 1;
2586 last_dev = 0; 2635 last_dev = 0;
2587 2636
2588 /* Starting free loop ID. */ 2637 /* Starting free loop ID. */
2589 loop_id = pha->min_external_loopid; 2638 loop_id = ha->min_external_loopid;
2590 for (; loop_id <= ha->last_loop_id; loop_id++) { 2639 for (; loop_id <= ha->max_loop_id; loop_id++) {
2591 if (qla2x00_is_reserved_id(ha, loop_id)) 2640 if (qla2x00_is_reserved_id(vha, loop_id))
2592 continue; 2641 continue;
2593 2642
2594 if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha)) 2643 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2595 break; 2644 break;
2596 2645
2597 if (swl != NULL) { 2646 if (swl != NULL) {
@@ -2614,7 +2663,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2614 } 2663 }
2615 } else { 2664 } else {
2616 /* Send GA_NXT to the switch */ 2665 /* Send GA_NXT to the switch */
2617 rval = qla2x00_ga_nxt(ha, new_fcport); 2666 rval = qla2x00_ga_nxt(vha, new_fcport);
2618 if (rval != QLA_SUCCESS) { 2667 if (rval != QLA_SUCCESS) {
2619 qla_printk(KERN_WARNING, ha, 2668 qla_printk(KERN_WARNING, ha,
2620 "SNS scan failed -- assuming zero-entry " 2669 "SNS scan failed -- assuming zero-entry "
@@ -2635,44 +2684,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2635 first_dev = 0; 2684 first_dev = 0;
2636 } else if (new_fcport->d_id.b24 == wrap.b24) { 2685 } else if (new_fcport->d_id.b24 == wrap.b24) {
2637 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2686 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2638 ha->host_no, new_fcport->d_id.b.domain, 2687 vha->host_no, new_fcport->d_id.b.domain,
2639 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2688 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2640 break; 2689 break;
2641 } 2690 }
2642 2691
2643 /* Bypass if same physical adapter. */ 2692 /* Bypass if same physical adapter. */
2644 if (new_fcport->d_id.b24 == pha->d_id.b24) 2693 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2645 continue; 2694 continue;
2646 2695
2647 /* Bypass virtual ports of the same host. */ 2696 /* Bypass virtual ports of the same host. */
2648 if (pha->num_vhosts) { 2697 found = 0;
2649 for_each_mapped_vp_idx(pha, vp_index) { 2698 if (ha->num_vhosts) {
2650 empty_vp_index = 1; 2699 list_for_each_entry(vp, &ha->vp_list, list) {
2651 found_vp = 0; 2700 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2652 list_for_each_entry(vha, &pha->vp_list, 2701 found = 1;
2653 vp_list) {
2654 if (vp_index == vha->vp_idx) {
2655 empty_vp_index = 0;
2656 found_vp = 1;
2657 break;
2658 }
2659 }
2660
2661 if (empty_vp_index)
2662 continue;
2663
2664 if (found_vp &&
2665 new_fcport->d_id.b24 == vha->d_id.b24)
2666 break; 2702 break;
2703 }
2667 } 2704 }
2668 2705 if (found)
2669 if (vp_index <= pha->max_npiv_vports)
2670 continue; 2706 continue;
2671 } 2707 }
2672 2708
2673 /* Bypass if same domain and area of adapter. */ 2709 /* Bypass if same domain and area of adapter. */
2674 if (((new_fcport->d_id.b24 & 0xffff00) == 2710 if (((new_fcport->d_id.b24 & 0xffff00) ==
2675 (ha->d_id.b24 & 0xffff00)) && ha->current_topology == 2711 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2676 ISP_CFG_FL) 2712 ISP_CFG_FL)
2677 continue; 2713 continue;
2678 2714
@@ -2682,9 +2718,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2682 2718
2683 /* Locate matching device in database. */ 2719 /* Locate matching device in database. */
2684 found = 0; 2720 found = 0;
2685 list_for_each_entry(fcport, &pha->fcports, list) { 2721 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2686 if (new_fcport->vp_idx != fcport->vp_idx)
2687 continue;
2688 if (memcmp(new_fcport->port_name, fcport->port_name, 2722 if (memcmp(new_fcport->port_name, fcport->port_name,
2689 WWN_SIZE)) 2723 WWN_SIZE))
2690 continue; 2724 continue;
@@ -2728,7 +2762,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2728 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2762 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2729 fcport->port_type != FCT_INITIATOR && 2763 fcport->port_type != FCT_INITIATOR &&
2730 fcport->port_type != FCT_BROADCAST) { 2764 fcport->port_type != FCT_BROADCAST) {
2731 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 2765 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2732 fcport->d_id.b.domain, fcport->d_id.b.area, 2766 fcport->d_id.b.domain, fcport->d_id.b.area,
2733 fcport->d_id.b.al_pa); 2767 fcport->d_id.b.al_pa);
2734 fcport->loop_id = FC_NO_LOOP_ID; 2768 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2739,27 +2773,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2739 2773
2740 if (found) 2774 if (found)
2741 continue; 2775 continue;
2742
2743 /* If device was not in our fcports list, then add it. */ 2776 /* If device was not in our fcports list, then add it. */
2744 list_add_tail(&new_fcport->list, new_fcports); 2777 list_add_tail(&new_fcport->list, new_fcports);
2745 2778
2746 /* Allocate a new replacement fcport. */ 2779 /* Allocate a new replacement fcport. */
2747 nxt_d_id.b24 = new_fcport->d_id.b24; 2780 nxt_d_id.b24 = new_fcport->d_id.b24;
2748 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2781 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2749 if (new_fcport == NULL) { 2782 if (new_fcport == NULL) {
2750 kfree(swl); 2783 kfree(swl);
2751 return (QLA_MEMORY_ALLOC_FAILED); 2784 return (QLA_MEMORY_ALLOC_FAILED);
2752 } 2785 }
2753 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2786 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2754 new_fcport->d_id.b24 = nxt_d_id.b24; 2787 new_fcport->d_id.b24 = nxt_d_id.b24;
2755 new_fcport->vp_idx = ha->vp_idx;
2756 } 2788 }
2757 2789
2758 kfree(swl); 2790 kfree(swl);
2759 kfree(new_fcport); 2791 kfree(new_fcport);
2760 2792
2761 if (!list_empty(new_fcports)) 2793 if (!list_empty(new_fcports))
2762 ha->device_flags |= DFLG_FABRIC_DEVICES; 2794 vha->device_flags |= DFLG_FABRIC_DEVICES;
2763 2795
2764 return (rval); 2796 return (rval);
2765} 2797}
@@ -2779,13 +2811,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2779 * Kernel context. 2811 * Kernel context.
2780 */ 2812 */
2781static int 2813static int
2782qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2814qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2783{ 2815{
2784 int rval; 2816 int rval;
2785 int found; 2817 int found;
2786 fc_port_t *fcport; 2818 fc_port_t *fcport;
2787 uint16_t first_loop_id; 2819 uint16_t first_loop_id;
2788 scsi_qla_host_t *pha = to_qla_parent(ha); 2820 struct qla_hw_data *ha = vha->hw;
2821 struct scsi_qla_host *vp;
2789 2822
2790 rval = QLA_SUCCESS; 2823 rval = QLA_SUCCESS;
2791 2824
@@ -2794,17 +2827,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2794 2827
2795 for (;;) { 2828 for (;;) {
2796 /* Skip loop ID if already used by adapter. */ 2829 /* Skip loop ID if already used by adapter. */
2797 if (dev->loop_id == ha->loop_id) { 2830 if (dev->loop_id == vha->loop_id)
2798 dev->loop_id++; 2831 dev->loop_id++;
2799 }
2800 2832
2801 /* Skip reserved loop IDs. */ 2833 /* Skip reserved loop IDs. */
2802 while (qla2x00_is_reserved_id(ha, dev->loop_id)) { 2834 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2803 dev->loop_id++; 2835 dev->loop_id++;
2804 }
2805 2836
2806 /* Reset loop ID if passed the end. */ 2837 /* Reset loop ID if passed the end. */
2807 if (dev->loop_id > ha->last_loop_id) { 2838 if (dev->loop_id > ha->max_loop_id) {
2808 /* first loop ID. */ 2839 /* first loop ID. */
2809 dev->loop_id = ha->min_external_loopid; 2840 dev->loop_id = ha->min_external_loopid;
2810 } 2841 }
@@ -2812,12 +2843,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2812 /* Check for loop ID being already in use. */ 2843 /* Check for loop ID being already in use. */
2813 found = 0; 2844 found = 0;
2814 fcport = NULL; 2845 fcport = NULL;
2815 list_for_each_entry(fcport, &pha->fcports, list) { 2846 list_for_each_entry(vp, &ha->vp_list, list) {
2816 if (fcport->loop_id == dev->loop_id && fcport != dev) { 2847 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2817 /* ID possibly in use */ 2848 if (fcport->loop_id == dev->loop_id &&
2818 found++; 2849 fcport != dev) {
2819 break; 2850 /* ID possibly in use */
2851 found++;
2852 break;
2853 }
2820 } 2854 }
2855 if (found)
2856 break;
2821 } 2857 }
2822 2858
2823 /* If not in use then it is free to use. */ 2859 /* If not in use then it is free to use. */
@@ -2850,7 +2886,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2850 * Kernel context. 2886 * Kernel context.
2851 */ 2887 */
2852static int 2888static int
2853qla2x00_device_resync(scsi_qla_host_t *ha) 2889qla2x00_device_resync(scsi_qla_host_t *vha)
2854{ 2890{
2855 int rval; 2891 int rval;
2856 uint32_t mask; 2892 uint32_t mask;
@@ -2859,14 +2895,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2859 uint8_t rscn_out_iter; 2895 uint8_t rscn_out_iter;
2860 uint8_t format; 2896 uint8_t format;
2861 port_id_t d_id; 2897 port_id_t d_id;
2862 scsi_qla_host_t *pha = to_qla_parent(ha);
2863 2898
2864 rval = QLA_RSCNS_HANDLED; 2899 rval = QLA_RSCNS_HANDLED;
2865 2900
2866 while (ha->rscn_out_ptr != ha->rscn_in_ptr || 2901 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2867 ha->flags.rscn_queue_overflow) { 2902 vha->flags.rscn_queue_overflow) {
2868 2903
2869 rscn_entry = ha->rscn_queue[ha->rscn_out_ptr]; 2904 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2870 format = MSB(MSW(rscn_entry)); 2905 format = MSB(MSW(rscn_entry));
2871 d_id.b.domain = LSB(MSW(rscn_entry)); 2906 d_id.b.domain = LSB(MSW(rscn_entry));
2872 d_id.b.area = MSB(LSW(rscn_entry)); 2907 d_id.b.area = MSB(LSW(rscn_entry));
@@ -2874,37 +2909,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2874 2909
2875 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2910 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2876 "[%02x/%02x%02x%02x].\n", 2911 "[%02x/%02x%02x%02x].\n",
2877 ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain, 2912 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2878 d_id.b.area, d_id.b.al_pa)); 2913 d_id.b.area, d_id.b.al_pa));
2879 2914
2880 ha->rscn_out_ptr++; 2915 vha->rscn_out_ptr++;
2881 if (ha->rscn_out_ptr == MAX_RSCN_COUNT) 2916 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2882 ha->rscn_out_ptr = 0; 2917 vha->rscn_out_ptr = 0;
2883 2918
2884 /* Skip duplicate entries. */ 2919 /* Skip duplicate entries. */
2885 for (rscn_out_iter = ha->rscn_out_ptr; 2920 for (rscn_out_iter = vha->rscn_out_ptr;
2886 !ha->flags.rscn_queue_overflow && 2921 !vha->flags.rscn_queue_overflow &&
2887 rscn_out_iter != ha->rscn_in_ptr; 2922 rscn_out_iter != vha->rscn_in_ptr;
2888 rscn_out_iter = (rscn_out_iter == 2923 rscn_out_iter = (rscn_out_iter ==
2889 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2924 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2890 2925
2891 if (rscn_entry != ha->rscn_queue[rscn_out_iter]) 2926 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2892 break; 2927 break;
2893 2928
2894 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2929 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2895 "entry found at [%d].\n", ha->host_no, 2930 "entry found at [%d].\n", vha->host_no,
2896 rscn_out_iter)); 2931 rscn_out_iter));
2897 2932
2898 ha->rscn_out_ptr = rscn_out_iter; 2933 vha->rscn_out_ptr = rscn_out_iter;
2899 } 2934 }
2900 2935
2901 /* Queue overflow, set switch default case. */ 2936 /* Queue overflow, set switch default case. */
2902 if (ha->flags.rscn_queue_overflow) { 2937 if (vha->flags.rscn_queue_overflow) {
2903 DEBUG(printk("scsi(%ld): device_resync: rscn " 2938 DEBUG(printk("scsi(%ld): device_resync: rscn "
2904 "overflow.\n", ha->host_no)); 2939 "overflow.\n", vha->host_no));
2905 2940
2906 format = 3; 2941 format = 3;
2907 ha->flags.rscn_queue_overflow = 0; 2942 vha->flags.rscn_queue_overflow = 0;
2908 } 2943 }
2909 2944
2910 switch (format) { 2945 switch (format) {
@@ -2920,16 +2955,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2920 default: 2955 default:
2921 mask = 0x0; 2956 mask = 0x0;
2922 d_id.b24 = 0; 2957 d_id.b24 = 0;
2923 ha->rscn_out_ptr = ha->rscn_in_ptr; 2958 vha->rscn_out_ptr = vha->rscn_in_ptr;
2924 break; 2959 break;
2925 } 2960 }
2926 2961
2927 rval = QLA_SUCCESS; 2962 rval = QLA_SUCCESS;
2928 2963
2929 list_for_each_entry(fcport, &pha->fcports, list) { 2964 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2930 if (fcport->vp_idx != ha->vp_idx)
2931 continue;
2932
2933 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2965 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2934 (fcport->d_id.b24 & mask) != d_id.b24 || 2966 (fcport->d_id.b24 & mask) != d_id.b24 ||
2935 fcport->port_type == FCT_BROADCAST) 2967 fcport->port_type == FCT_BROADCAST)
@@ -2938,7 +2970,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2938 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2970 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2939 if (format != 3 || 2971 if (format != 3 ||
2940 fcport->port_type != FCT_INITIATOR) { 2972 fcport->port_type != FCT_INITIATOR) {
2941 qla2x00_mark_device_lost(ha, fcport, 2973 qla2x00_mark_device_lost(vha, fcport,
2942 0, 0); 2974 0, 0);
2943 } 2975 }
2944 } 2976 }
@@ -2965,30 +2997,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2965 * Kernel context. 2997 * Kernel context.
2966 */ 2998 */
2967static int 2999static int
2968qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3000qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
2969 uint16_t *next_loopid) 3001 uint16_t *next_loopid)
2970{ 3002{
2971 int rval; 3003 int rval;
2972 int retry; 3004 int retry;
2973 uint8_t opts; 3005 uint8_t opts;
3006 struct qla_hw_data *ha = vha->hw;
2974 3007
2975 rval = QLA_SUCCESS; 3008 rval = QLA_SUCCESS;
2976 retry = 0; 3009 retry = 0;
2977 3010
2978 rval = qla2x00_fabric_login(ha, fcport, next_loopid); 3011 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
2979 if (rval == QLA_SUCCESS) { 3012 if (rval == QLA_SUCCESS) {
2980 /* Send an ADISC to tape devices.*/ 3013 /* Send an ADISC to tape devices.*/
2981 opts = 0; 3014 opts = 0;
2982 if (fcport->flags & FCF_TAPE_PRESENT) 3015 if (fcport->flags & FCF_TAPE_PRESENT)
2983 opts |= BIT_1; 3016 opts |= BIT_1;
2984 rval = qla2x00_get_port_database(ha, fcport, opts); 3017 rval = qla2x00_get_port_database(vha, fcport, opts);
2985 if (rval != QLA_SUCCESS) { 3018 if (rval != QLA_SUCCESS) {
2986 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3019 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2987 fcport->d_id.b.domain, fcport->d_id.b.area, 3020 fcport->d_id.b.domain, fcport->d_id.b.area,
2988 fcport->d_id.b.al_pa); 3021 fcport->d_id.b.al_pa);
2989 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3022 qla2x00_mark_device_lost(vha, fcport, 1, 0);
2990 } else { 3023 } else {
2991 qla2x00_update_fcport(ha, fcport); 3024 qla2x00_update_fcport(vha, fcport);
2992 } 3025 }
2993 } 3026 }
2994 3027
@@ -3010,13 +3043,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3010 * 3 - Fatal error 3043 * 3 - Fatal error
3011 */ 3044 */
3012int 3045int
3013qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3046qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3014 uint16_t *next_loopid) 3047 uint16_t *next_loopid)
3015{ 3048{
3016 int rval; 3049 int rval;
3017 int retry; 3050 int retry;
3018 uint16_t tmp_loopid; 3051 uint16_t tmp_loopid;
3019 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3052 uint16_t mb[MAILBOX_REGISTER_COUNT];
3053 struct qla_hw_data *ha = vha->hw;
3020 3054
3021 retry = 0; 3055 retry = 0;
3022 tmp_loopid = 0; 3056 tmp_loopid = 0;
@@ -3024,11 +3058,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3024 for (;;) { 3058 for (;;) {
3025 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3059 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3026 "for port %02x%02x%02x.\n", 3060 "for port %02x%02x%02x.\n",
3027 ha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3061 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3028 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3062 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3029 3063
3030 /* Login fcport on switch. */ 3064 /* Login fcport on switch. */
3031 ha->isp_ops->fabric_login(ha, fcport->loop_id, 3065 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3032 fcport->d_id.b.domain, fcport->d_id.b.area, 3066 fcport->d_id.b.domain, fcport->d_id.b.area,
3033 fcport->d_id.b.al_pa, mb, BIT_0); 3067 fcport->d_id.b.al_pa, mb, BIT_0);
3034 if (mb[0] == MBS_PORT_ID_USED) { 3068 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3084,7 +3118,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3084 * Loop ID already used, try next loop ID. 3118 * Loop ID already used, try next loop ID.
3085 */ 3119 */
3086 fcport->loop_id++; 3120 fcport->loop_id++;
3087 rval = qla2x00_find_new_loop_id(ha, fcport); 3121 rval = qla2x00_find_new_loop_id(vha, fcport);
3088 if (rval != QLA_SUCCESS) { 3122 if (rval != QLA_SUCCESS) {
3089 /* Ran out of loop IDs to use */ 3123 /* Ran out of loop IDs to use */
3090 break; 3124 break;
@@ -3096,10 +3130,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3096 * dead. 3130 * dead.
3097 */ 3131 */
3098 *next_loopid = fcport->loop_id; 3132 *next_loopid = fcport->loop_id;
3099 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3133 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3100 fcport->d_id.b.domain, fcport->d_id.b.area, 3134 fcport->d_id.b.domain, fcport->d_id.b.area,
3101 fcport->d_id.b.al_pa); 3135 fcport->d_id.b.al_pa);
3102 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3136 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3103 3137
3104 rval = 1; 3138 rval = 1;
3105 break; 3139 break;
@@ -3109,12 +3143,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3109 */ 3143 */
3110 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3144 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3111 "loop_id=%x jiffies=%lx.\n", 3145 "loop_id=%x jiffies=%lx.\n",
3112 __func__, ha->host_no, mb[0], 3146 __func__, vha->host_no, mb[0],
3113 fcport->d_id.b.domain, fcport->d_id.b.area, 3147 fcport->d_id.b.domain, fcport->d_id.b.area,
3114 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3148 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3115 3149
3116 *next_loopid = fcport->loop_id; 3150 *next_loopid = fcport->loop_id;
3117 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3151 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3118 fcport->d_id.b.domain, fcport->d_id.b.area, 3152 fcport->d_id.b.domain, fcport->d_id.b.area,
3119 fcport->d_id.b.al_pa); 3153 fcport->d_id.b.al_pa);
3120 fcport->loop_id = FC_NO_LOOP_ID; 3154 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3142,13 +3176,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3142 * 3 - Fatal error 3176 * 3 - Fatal error
3143 */ 3177 */
3144int 3178int
3145qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport) 3179qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3146{ 3180{
3147 int rval; 3181 int rval;
3148 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3182 uint16_t mb[MAILBOX_REGISTER_COUNT];
3149 3183
3150 memset(mb, 0, sizeof(mb)); 3184 memset(mb, 0, sizeof(mb));
3151 rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0); 3185 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3152 if (rval == QLA_SUCCESS) { 3186 if (rval == QLA_SUCCESS) {
3153 /* Interrogate mailbox registers for any errors */ 3187 /* Interrogate mailbox registers for any errors */
3154 if (mb[0] == MBS_COMMAND_ERROR) 3188 if (mb[0] == MBS_COMMAND_ERROR)
@@ -3172,57 +3206,57 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
3172 * 0 = success 3206 * 0 = success
3173 */ 3207 */
3174int 3208int
3175qla2x00_loop_resync(scsi_qla_host_t *ha) 3209qla2x00_loop_resync(scsi_qla_host_t *vha)
3176{ 3210{
3177 int rval; 3211 int rval = QLA_SUCCESS;
3178 uint32_t wait_time; 3212 uint32_t wait_time;
3179 3213 struct qla_hw_data *ha = vha->hw;
3180 rval = QLA_SUCCESS; 3214 struct req_que *req = ha->req_q_map[0];
3181 3215 struct rsp_que *rsp = ha->rsp_q_map[0];
3182 atomic_set(&ha->loop_state, LOOP_UPDATE); 3216
3183 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3217 atomic_set(&vha->loop_state, LOOP_UPDATE);
3184 if (ha->flags.online) { 3218 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3185 if (!(rval = qla2x00_fw_ready(ha))) { 3219 if (vha->flags.online) {
3220 if (!(rval = qla2x00_fw_ready(vha))) {
3186 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3221 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3187 wait_time = 256; 3222 wait_time = 256;
3188 do { 3223 do {
3189 atomic_set(&ha->loop_state, LOOP_UPDATE); 3224 atomic_set(&vha->loop_state, LOOP_UPDATE);
3190 3225
3191 /* Issue a marker after FW becomes ready. */ 3226 /* Issue a marker after FW becomes ready. */
3192 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3227 qla2x00_marker(vha, req, rsp, 0, 0,
3193 ha->marker_needed = 0; 3228 MK_SYNC_ALL);
3229 vha->marker_needed = 0;
3194 3230
3195 /* Remap devices on Loop. */ 3231 /* Remap devices on Loop. */
3196 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3232 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3197 3233
3198 qla2x00_configure_loop(ha); 3234 qla2x00_configure_loop(vha);
3199 wait_time--; 3235 wait_time--;
3200 } while (!atomic_read(&ha->loop_down_timer) && 3236 } while (!atomic_read(&vha->loop_down_timer) &&
3201 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3237 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3202 wait_time && 3238 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3203 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3239 &vha->dpc_flags)));
3204 } 3240 }
3205 } 3241 }
3206 3242
3207 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 3243 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3208 return (QLA_FUNCTION_FAILED); 3244 return (QLA_FUNCTION_FAILED);
3209 }
3210 3245
3211 if (rval) { 3246 if (rval)
3212 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3247 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3213 }
3214 3248
3215 return (rval); 3249 return (rval);
3216} 3250}
3217 3251
3218void 3252void
3219qla2x00_update_fcports(scsi_qla_host_t *ha) 3253qla2x00_update_fcports(scsi_qla_host_t *vha)
3220{ 3254{
3221 fc_port_t *fcport; 3255 fc_port_t *fcport;
3222 3256
3223 /* Go with deferred removal of rport references. */ 3257 /* Go with deferred removal of rport references. */
3224 list_for_each_entry(fcport, &ha->fcports, list) 3258 list_for_each_entry(fcport, &vha->vp_fcports, list)
3225 if (fcport->drport && 3259 if (fcport && fcport->drport &&
3226 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3260 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3227 qla2x00_rport_del(fcport); 3261 qla2x00_rport_del(fcport);
3228} 3262}
@@ -3238,63 +3272,65 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3238* 0 = success 3272* 0 = success
3239*/ 3273*/
3240int 3274int
3241qla2x00_abort_isp(scsi_qla_host_t *ha) 3275qla2x00_abort_isp(scsi_qla_host_t *vha)
3242{ 3276{
3243 int rval; 3277 int rval;
3244 uint8_t status = 0; 3278 uint8_t status = 0;
3245 scsi_qla_host_t *vha; 3279 struct qla_hw_data *ha = vha->hw;
3280 struct scsi_qla_host *vp;
3281 struct req_que *req = ha->req_q_map[0];
3246 3282
3247 if (ha->flags.online) { 3283 if (vha->flags.online) {
3248 ha->flags.online = 0; 3284 vha->flags.online = 0;
3249 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3285 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3250 ha->qla_stats.total_isp_aborts++; 3286 ha->qla_stats.total_isp_aborts++;
3251 3287
3252 qla_printk(KERN_INFO, ha, 3288 qla_printk(KERN_INFO, ha,
3253 "Performing ISP error recovery - ha= %p.\n", ha); 3289 "Performing ISP error recovery - ha= %p.\n", ha);
3254 ha->isp_ops->reset_chip(ha); 3290 ha->isp_ops->reset_chip(vha);
3255 3291
3256 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3292 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3257 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3293 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3258 atomic_set(&ha->loop_state, LOOP_DOWN); 3294 atomic_set(&vha->loop_state, LOOP_DOWN);
3259 qla2x00_mark_all_devices_lost(ha, 0); 3295 qla2x00_mark_all_devices_lost(vha, 0);
3260 list_for_each_entry(vha, &ha->vp_list, vp_list) 3296 list_for_each_entry(vp, &ha->vp_list, list)
3261 qla2x00_mark_all_devices_lost(vha, 0); 3297 qla2x00_mark_all_devices_lost(vp, 0);
3262 } else { 3298 } else {
3263 if (!atomic_read(&ha->loop_down_timer)) 3299 if (!atomic_read(&vha->loop_down_timer))
3264 atomic_set(&ha->loop_down_timer, 3300 atomic_set(&vha->loop_down_timer,
3265 LOOP_DOWN_TIME); 3301 LOOP_DOWN_TIME);
3266 } 3302 }
3267 3303
3268 /* Requeue all commands in outstanding command list. */ 3304 /* Requeue all commands in outstanding command list. */
3269 qla2x00_abort_all_cmds(ha, DID_RESET << 16); 3305 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3270 3306
3271 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3307 ha->isp_ops->get_flash_version(vha, req->ring);
3272 3308
3273 ha->isp_ops->nvram_config(ha); 3309 ha->isp_ops->nvram_config(vha);
3274 3310
3275 if (!qla2x00_restart_isp(ha)) { 3311 if (!qla2x00_restart_isp(vha)) {
3276 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3312 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3277 3313
3278 if (!atomic_read(&ha->loop_down_timer)) { 3314 if (!atomic_read(&vha->loop_down_timer)) {
3279 /* 3315 /*
3280 * Issue marker command only when we are going 3316 * Issue marker command only when we are going
3281 * to start the I/O . 3317 * to start the I/O .
3282 */ 3318 */
3283 ha->marker_needed = 1; 3319 vha->marker_needed = 1;
3284 } 3320 }
3285 3321
3286 ha->flags.online = 1; 3322 vha->flags.online = 1;
3287 3323
3288 ha->isp_ops->enable_intrs(ha); 3324 ha->isp_ops->enable_intrs(ha);
3289 3325
3290 ha->isp_abort_cnt = 0; 3326 ha->isp_abort_cnt = 0;
3291 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3327 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3292 3328
3293 if (ha->fce) { 3329 if (ha->fce) {
3294 ha->flags.fce_enabled = 1; 3330 ha->flags.fce_enabled = 1;
3295 memset(ha->fce, 0, 3331 memset(ha->fce, 0,
3296 fce_calc_size(ha->fce_bufs)); 3332 fce_calc_size(ha->fce_bufs));
3297 rval = qla2x00_enable_fce_trace(ha, 3333 rval = qla2x00_enable_fce_trace(vha,
3298 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3334 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3299 &ha->fce_bufs); 3335 &ha->fce_bufs);
3300 if (rval) { 3336 if (rval) {
@@ -3307,7 +3343,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3307 3343
3308 if (ha->eft) { 3344 if (ha->eft) {
3309 memset(ha->eft, 0, EFT_SIZE); 3345 memset(ha->eft, 0, EFT_SIZE);
3310 rval = qla2x00_enable_eft_trace(ha, 3346 rval = qla2x00_enable_eft_trace(vha,
3311 ha->eft_dma, EFT_NUM_BUFFERS); 3347 ha->eft_dma, EFT_NUM_BUFFERS);
3312 if (rval) { 3348 if (rval) {
3313 qla_printk(KERN_WARNING, ha, 3349 qla_printk(KERN_WARNING, ha,
@@ -3316,8 +3352,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3316 } 3352 }
3317 } 3353 }
3318 } else { /* failed the ISP abort */ 3354 } else { /* failed the ISP abort */
3319 ha->flags.online = 1; 3355 vha->flags.online = 1;
3320 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3356 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3321 if (ha->isp_abort_cnt == 0) { 3357 if (ha->isp_abort_cnt == 0) {
3322 qla_printk(KERN_WARNING, ha, 3358 qla_printk(KERN_WARNING, ha,
3323 "ISP error recovery failed - " 3359 "ISP error recovery failed - "
@@ -3326,37 +3362,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3326 * The next call disables the board 3362 * The next call disables the board
3327 * completely. 3363 * completely.
3328 */ 3364 */
3329 ha->isp_ops->reset_adapter(ha); 3365 ha->isp_ops->reset_adapter(vha);
3330 ha->flags.online = 0; 3366 vha->flags.online = 0;
3331 clear_bit(ISP_ABORT_RETRY, 3367 clear_bit(ISP_ABORT_RETRY,
3332 &ha->dpc_flags); 3368 &vha->dpc_flags);
3333 status = 0; 3369 status = 0;
3334 } else { /* schedule another ISP abort */ 3370 } else { /* schedule another ISP abort */
3335 ha->isp_abort_cnt--; 3371 ha->isp_abort_cnt--;
3336 DEBUG(printk("qla%ld: ISP abort - " 3372 DEBUG(printk("qla%ld: ISP abort - "
3337 "retry remaining %d\n", 3373 "retry remaining %d\n",
3338 ha->host_no, ha->isp_abort_cnt)); 3374 vha->host_no, ha->isp_abort_cnt));
3339 status = 1; 3375 status = 1;
3340 } 3376 }
3341 } else { 3377 } else {
3342 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3378 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3343 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3379 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3344 "- retrying (%d) more times\n", 3380 "- retrying (%d) more times\n",
3345 ha->host_no, ha->isp_abort_cnt)); 3381 vha->host_no, ha->isp_abort_cnt));
3346 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3382 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3347 status = 1; 3383 status = 1;
3348 } 3384 }
3349 } 3385 }
3350 3386
3351 } 3387 }
3352 3388
3353 if (status) { 3389 if (!status) {
3390 DEBUG(printk(KERN_INFO
3391 "qla2x00_abort_isp(%ld): succeeded.\n",
3392 vha->host_no));
3393 list_for_each_entry(vp, &ha->vp_list, list) {
3394 if (vp->vp_idx)
3395 qla2x00_vp_abort_isp(vp);
3396 }
3397 } else {
3354 qla_printk(KERN_INFO, ha, 3398 qla_printk(KERN_INFO, ha,
3355 "qla2x00_abort_isp: **** FAILED ****\n"); 3399 "qla2x00_abort_isp: **** FAILED ****\n");
3356 } else {
3357 DEBUG(printk(KERN_INFO
3358 "qla2x00_abort_isp(%ld): exiting.\n",
3359 ha->host_no));
3360 } 3400 }
3361 3401
3362 return(status); 3402 return(status);
@@ -3373,42 +3413,50 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3373* 0 = success 3413* 0 = success
3374*/ 3414*/
3375static int 3415static int
3376qla2x00_restart_isp(scsi_qla_host_t *ha) 3416qla2x00_restart_isp(scsi_qla_host_t *vha)
3377{ 3417{
3378 uint8_t status = 0; 3418 uint8_t status = 0;
3379 uint32_t wait_time; 3419 uint32_t wait_time;
3420 struct qla_hw_data *ha = vha->hw;
3421 struct req_que *req = ha->req_q_map[0];
3422 struct rsp_que *rsp = ha->rsp_q_map[0];
3380 3423
3381 /* If firmware needs to be loaded */ 3424 /* If firmware needs to be loaded */
3382 if (qla2x00_isp_firmware(ha)) { 3425 if (qla2x00_isp_firmware(vha)) {
3383 ha->flags.online = 0; 3426 vha->flags.online = 0;
3384 if (!(status = ha->isp_ops->chip_diag(ha))) 3427 status = ha->isp_ops->chip_diag(vha);
3385 status = qla2x00_setup_chip(ha); 3428 if (!status)
3429 status = qla2x00_setup_chip(vha);
3386 } 3430 }
3387 3431
3388 if (!status && !(status = qla2x00_init_rings(ha))) { 3432 if (!status && !(status = qla2x00_init_rings(vha))) {
3389 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3433 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3390 if (!(status = qla2x00_fw_ready(ha))) { 3434 /* Initialize the queues in use */
3435 qla25xx_init_queues(ha);
3436
3437 status = qla2x00_fw_ready(vha);
3438 if (!status) {
3391 DEBUG(printk("%s(): Start configure loop, " 3439 DEBUG(printk("%s(): Start configure loop, "
3392 "status = %d\n", __func__, status)); 3440 "status = %d\n", __func__, status));
3393 3441
3394 /* Issue a marker after FW becomes ready. */ 3442 /* Issue a marker after FW becomes ready. */
3395 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3443 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
3396 3444
3397 ha->flags.online = 1; 3445 vha->flags.online = 1;
3398 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3446 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3399 wait_time = 256; 3447 wait_time = 256;
3400 do { 3448 do {
3401 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3449 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3402 qla2x00_configure_loop(ha); 3450 qla2x00_configure_loop(vha);
3403 wait_time--; 3451 wait_time--;
3404 } while (!atomic_read(&ha->loop_down_timer) && 3452 } while (!atomic_read(&vha->loop_down_timer) &&
3405 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3453 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3406 wait_time && 3454 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3407 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3455 &vha->dpc_flags)));
3408 } 3456 }
3409 3457
3410 /* if no cable then assume it's good */ 3458 /* if no cable then assume it's good */
3411 if ((ha->device_flags & DFLG_NO_CABLE)) 3459 if ((vha->device_flags & DFLG_NO_CABLE))
3412 status = 0; 3460 status = 0;
3413 3461
3414 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3462 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3418,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3418 return (status); 3466 return (status);
3419} 3467}
3420 3468
3469static int
3470qla25xx_init_queues(struct qla_hw_data *ha)
3471{
3472 struct rsp_que *rsp = NULL;
3473 struct req_que *req = NULL;
3474 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3475 int ret = -1;
3476 int i;
3477
3478 for (i = 1; i < ha->max_queues; i++) {
3479 rsp = ha->rsp_q_map[i];
3480 if (rsp) {
3481 rsp->options &= ~BIT_0;
3482 ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options);
3483 if (ret != QLA_SUCCESS)
3484 DEBUG2_17(printk(KERN_WARNING
3485 "%s Rsp que:%d init failed\n", __func__,
3486 rsp->id));
3487 else
3488 DEBUG2_17(printk(KERN_INFO
3489 "%s Rsp que:%d inited\n", __func__,
3490 rsp->id));
3491 }
3492 req = ha->req_q_map[i];
3493 if (req) {
3494 req->options &= ~BIT_0;
3495 ret = qla25xx_init_req_que(base_vha, req, req->options);
3496 if (ret != QLA_SUCCESS)
3497 DEBUG2_17(printk(KERN_WARNING
3498 "%s Req que:%d init failed\n", __func__,
3499 req->id));
3500 else
3501 DEBUG2_17(printk(KERN_WARNING
3502 "%s Rsp que:%d inited\n", __func__,
3503 req->id));
3504 }
3505 }
3506 return ret;
3507}
3508
3421/* 3509/*
3422* qla2x00_reset_adapter 3510* qla2x00_reset_adapter
3423* Reset adapter. 3511* Reset adapter.
@@ -3426,12 +3514,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3426* ha = adapter block pointer. 3514* ha = adapter block pointer.
3427*/ 3515*/
3428void 3516void
3429qla2x00_reset_adapter(scsi_qla_host_t *ha) 3517qla2x00_reset_adapter(scsi_qla_host_t *vha)
3430{ 3518{
3431 unsigned long flags = 0; 3519 unsigned long flags = 0;
3520 struct qla_hw_data *ha = vha->hw;
3432 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3521 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3433 3522
3434 ha->flags.online = 0; 3523 vha->flags.online = 0;
3435 ha->isp_ops->disable_intrs(ha); 3524 ha->isp_ops->disable_intrs(ha);
3436 3525
3437 spin_lock_irqsave(&ha->hardware_lock, flags); 3526 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3443,12 +3532,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3443} 3532}
3444 3533
3445void 3534void
3446qla24xx_reset_adapter(scsi_qla_host_t *ha) 3535qla24xx_reset_adapter(scsi_qla_host_t *vha)
3447{ 3536{
3448 unsigned long flags = 0; 3537 unsigned long flags = 0;
3538 struct qla_hw_data *ha = vha->hw;
3449 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3539 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3450 3540
3451 ha->flags.online = 0; 3541 vha->flags.online = 0;
3452 ha->isp_ops->disable_intrs(ha); 3542 ha->isp_ops->disable_intrs(ha);
3453 3543
3454 spin_lock_irqsave(&ha->hardware_lock, flags); 3544 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3462,9 +3552,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3462/* On sparc systems, obtain port and node WWN from firmware 3552/* On sparc systems, obtain port and node WWN from firmware
3463 * properties. 3553 * properties.
3464 */ 3554 */
3465static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv) 3555static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3556 struct nvram_24xx *nv)
3466{ 3557{
3467#ifdef CONFIG_SPARC 3558#ifdef CONFIG_SPARC
3559 struct qla_hw_data *ha = vha->hw;
3468 struct pci_dev *pdev = ha->pdev; 3560 struct pci_dev *pdev = ha->pdev;
3469 struct device_node *dp = pci_device_to_OF_node(pdev); 3561 struct device_node *dp = pci_device_to_OF_node(pdev);
3470 const u8 *val; 3562 const u8 *val;
@@ -3481,7 +3573,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
3481} 3573}
3482 3574
3483int 3575int
3484qla24xx_nvram_config(scsi_qla_host_t *ha) 3576qla24xx_nvram_config(scsi_qla_host_t *vha)
3485{ 3577{
3486 int rval; 3578 int rval;
3487 struct init_cb_24xx *icb; 3579 struct init_cb_24xx *icb;
@@ -3490,6 +3582,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3490 uint8_t *dptr1, *dptr2; 3582 uint8_t *dptr1, *dptr2;
3491 uint32_t chksum; 3583 uint32_t chksum;
3492 uint16_t cnt; 3584 uint16_t cnt;
3585 struct qla_hw_data *ha = vha->hw;
3493 3586
3494 rval = QLA_SUCCESS; 3587 rval = QLA_SUCCESS;
3495 icb = (struct init_cb_24xx *)ha->init_cb; 3588 icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3507,12 +3600,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3507 3600
3508 /* Get VPD data into cache */ 3601 /* Get VPD data into cache */
3509 ha->vpd = ha->nvram + VPD_OFFSET; 3602 ha->vpd = ha->nvram + VPD_OFFSET;
3510 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, 3603 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3511 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3604 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3512 3605
3513 /* Get NVRAM data into cache and calculate checksum. */ 3606 /* Get NVRAM data into cache and calculate checksum. */
3514 dptr = (uint32_t *)nv; 3607 dptr = (uint32_t *)nv;
3515 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3608 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3516 ha->nvram_size); 3609 ha->nvram_size);
3517 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3610 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3518 chksum += le32_to_cpu(*dptr++); 3611 chksum += le32_to_cpu(*dptr++);
@@ -3557,7 +3650,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3557 nv->node_name[5] = 0x1c; 3650 nv->node_name[5] = 0x1c;
3558 nv->node_name[6] = 0x55; 3651 nv->node_name[6] = 0x55;
3559 nv->node_name[7] = 0x86; 3652 nv->node_name[7] = 0x86;
3560 qla24xx_nvram_wwn_from_ofw(ha, nv); 3653 qla24xx_nvram_wwn_from_ofw(vha, nv);
3561 nv->login_retry_count = __constant_cpu_to_le16(8); 3654 nv->login_retry_count = __constant_cpu_to_le16(8);
3562 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3655 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3563 nv->login_timeout = __constant_cpu_to_le16(0); 3656 nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3577,7 +3670,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3577 } 3670 }
3578 3671
3579 /* Reset Initialization control block */ 3672 /* Reset Initialization control block */
3580 memset(icb, 0, sizeof(struct init_cb_24xx)); 3673 memset(icb, 0, ha->init_cb_size);
3581 3674
3582 /* Copy 1st segment. */ 3675 /* Copy 1st segment. */
3583 dptr1 = (uint8_t *)icb; 3676 dptr1 = (uint8_t *)icb;
@@ -3600,7 +3693,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3600 /* 3693 /*
3601 * Setup driver NVRAM options. 3694 * Setup driver NVRAM options.
3602 */ 3695 */
3603 qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name), 3696 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3604 "QLA2462"); 3697 "QLA2462");
3605 3698
3606 /* Use alternate WWN? */ 3699 /* Use alternate WWN? */
@@ -3639,8 +3732,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3639 ha->serial0 = icb->port_name[5]; 3732 ha->serial0 = icb->port_name[5];
3640 ha->serial1 = icb->port_name[6]; 3733 ha->serial1 = icb->port_name[6];
3641 ha->serial2 = icb->port_name[7]; 3734 ha->serial2 = icb->port_name[7];
3642 ha->node_name = icb->node_name; 3735 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3643 ha->port_name = icb->port_name; 3736 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3644 3737
3645 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3738 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3646 3739
@@ -3695,7 +3788,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3695 ha->login_retry_count = ql2xloginretrycount; 3788 ha->login_retry_count = ql2xloginretrycount;
3696 3789
3697 /* Enable ZIO. */ 3790 /* Enable ZIO. */
3698 if (!ha->flags.init_done) { 3791 if (!vha->flags.init_done) {
3699 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3792 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3700 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3793 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3701 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3794 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3703,12 +3796,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3703 } 3796 }
3704 icb->firmware_options_2 &= __constant_cpu_to_le32( 3797 icb->firmware_options_2 &= __constant_cpu_to_le32(
3705 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3798 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3706 ha->flags.process_response_queue = 0; 3799 vha->flags.process_response_queue = 0;
3707 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3800 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3708 ha->zio_mode = QLA_ZIO_MODE_6; 3801 ha->zio_mode = QLA_ZIO_MODE_6;
3709 3802
3710 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3803 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3711 "(%d us).\n", ha->host_no, ha->zio_mode, 3804 "(%d us).\n", vha->host_no, ha->zio_mode,
3712 ha->zio_timer * 100)); 3805 ha->zio_timer * 100));
3713 qla_printk(KERN_INFO, ha, 3806 qla_printk(KERN_INFO, ha,
3714 "ZIO mode %d enabled; timer delay (%d us).\n", 3807 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3717,36 +3810,37 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3717 icb->firmware_options_2 |= cpu_to_le32( 3810 icb->firmware_options_2 |= cpu_to_le32(
3718 (uint32_t)ha->zio_mode); 3811 (uint32_t)ha->zio_mode);
3719 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3812 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3720 ha->flags.process_response_queue = 1; 3813 vha->flags.process_response_queue = 1;
3721 } 3814 }
3722 3815
3723 if (rval) { 3816 if (rval) {
3724 DEBUG2_3(printk(KERN_WARNING 3817 DEBUG2_3(printk(KERN_WARNING
3725 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 3818 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3726 } 3819 }
3727 return (rval); 3820 return (rval);
3728} 3821}
3729 3822
3730static int 3823static int
3731qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3824qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3732{ 3825{
3733 int rval; 3826 int rval = QLA_SUCCESS;
3734 int segments, fragment; 3827 int segments, fragment;
3735 uint32_t faddr; 3828 uint32_t faddr;
3736 uint32_t *dcode, dlen; 3829 uint32_t *dcode, dlen;
3737 uint32_t risc_addr; 3830 uint32_t risc_addr;
3738 uint32_t risc_size; 3831 uint32_t risc_size;
3739 uint32_t i; 3832 uint32_t i;
3740 3833 struct qla_hw_data *ha = vha->hw;
3834 struct req_que *req = ha->req_q_map[0];
3741 rval = QLA_SUCCESS; 3835 rval = QLA_SUCCESS;
3742 3836
3743 segments = FA_RISC_CODE_SEGMENTS; 3837 segments = FA_RISC_CODE_SEGMENTS;
3744 faddr = ha->flt_region_fw; 3838 faddr = ha->flt_region_fw;
3745 dcode = (uint32_t *)ha->request_ring; 3839 dcode = (uint32_t *)req->ring;
3746 *srisc_addr = 0; 3840 *srisc_addr = 0;
3747 3841
3748 /* Validate firmware image by checking version. */ 3842 /* Validate firmware image by checking version. */
3749 qla24xx_read_flash_data(ha, dcode, faddr + 4, 4); 3843 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3750 for (i = 0; i < 4; i++) 3844 for (i = 0; i < 4; i++)
3751 dcode[i] = be32_to_cpu(dcode[i]); 3845 dcode[i] = be32_to_cpu(dcode[i]);
3752 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3846 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3764,7 +3858,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3764 3858
3765 while (segments && rval == QLA_SUCCESS) { 3859 while (segments && rval == QLA_SUCCESS) {
3766 /* Read segment's load information. */ 3860 /* Read segment's load information. */
3767 qla24xx_read_flash_data(ha, dcode, faddr, 4); 3861 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3768 3862
3769 risc_addr = be32_to_cpu(dcode[2]); 3863 risc_addr = be32_to_cpu(dcode[2]);
3770 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3864 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3778,17 +3872,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3778 3872
3779 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3873 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3780 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3874 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3781 ha->host_no, risc_addr, dlen, faddr)); 3875 vha->host_no, risc_addr, dlen, faddr));
3782 3876
3783 qla24xx_read_flash_data(ha, dcode, faddr, dlen); 3877 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3784 for (i = 0; i < dlen; i++) 3878 for (i = 0; i < dlen; i++)
3785 dcode[i] = swab32(dcode[i]); 3879 dcode[i] = swab32(dcode[i]);
3786 3880
3787 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3881 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3788 dlen); 3882 dlen);
3789 if (rval) { 3883 if (rval) {
3790 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3884 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3791 "segment %d of firmware\n", ha->host_no, 3885 "segment %d of firmware\n", vha->host_no,
3792 fragment)); 3886 fragment));
3793 qla_printk(KERN_WARNING, ha, 3887 qla_printk(KERN_WARNING, ha,
3794 "[ERROR] Failed to load segment %d of " 3888 "[ERROR] Failed to load segment %d of "
@@ -3812,16 +3906,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3812#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3906#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3813 3907
3814int 3908int
3815qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3909qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3816{ 3910{
3817 int rval; 3911 int rval;
3818 int i, fragment; 3912 int i, fragment;
3819 uint16_t *wcode, *fwcode; 3913 uint16_t *wcode, *fwcode;
3820 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3914 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3821 struct fw_blob *blob; 3915 struct fw_blob *blob;
3916 struct qla_hw_data *ha = vha->hw;
3917 struct req_que *req = ha->req_q_map[0];
3822 3918
3823 /* Load firmware blob. */ 3919 /* Load firmware blob. */
3824 blob = qla2x00_request_firmware(ha); 3920 blob = qla2x00_request_firmware(vha);
3825 if (!blob) { 3921 if (!blob) {
3826 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3922 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3827 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3923 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3831,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3831 3927
3832 rval = QLA_SUCCESS; 3928 rval = QLA_SUCCESS;
3833 3929
3834 wcode = (uint16_t *)ha->request_ring; 3930 wcode = (uint16_t *)req->ring;
3835 *srisc_addr = 0; 3931 *srisc_addr = 0;
3836 fwcode = (uint16_t *)blob->fw->data; 3932 fwcode = (uint16_t *)blob->fw->data;
3837 fwclen = 0; 3933 fwclen = 0;
@@ -3878,17 +3974,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3878 wlen = risc_size; 3974 wlen = risc_size;
3879 3975
3880 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3976 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3881 "addr %x, number of words 0x%x.\n", ha->host_no, 3977 "addr %x, number of words 0x%x.\n", vha->host_no,
3882 risc_addr, wlen)); 3978 risc_addr, wlen));
3883 3979
3884 for (i = 0; i < wlen; i++) 3980 for (i = 0; i < wlen; i++)
3885 wcode[i] = swab16(fwcode[i]); 3981 wcode[i] = swab16(fwcode[i]);
3886 3982
3887 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3983 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3888 wlen); 3984 wlen);
3889 if (rval) { 3985 if (rval) {
3890 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3986 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3891 "segment %d of firmware\n", ha->host_no, 3987 "segment %d of firmware\n", vha->host_no,
3892 fragment)); 3988 fragment));
3893 qla_printk(KERN_WARNING, ha, 3989 qla_printk(KERN_WARNING, ha,
3894 "[ERROR] Failed to load segment %d of " 3990 "[ERROR] Failed to load segment %d of "
@@ -3912,7 +4008,7 @@ fail_fw_integrity:
3912} 4008}
3913 4009
3914int 4010int
3915qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 4011qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3916{ 4012{
3917 int rval; 4013 int rval;
3918 int segments, fragment; 4014 int segments, fragment;
@@ -3922,9 +4018,11 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3922 uint32_t i; 4018 uint32_t i;
3923 struct fw_blob *blob; 4019 struct fw_blob *blob;
3924 uint32_t *fwcode, fwclen; 4020 uint32_t *fwcode, fwclen;
4021 struct qla_hw_data *ha = vha->hw;
4022 struct req_que *req = ha->req_q_map[0];
3925 4023
3926 /* Load firmware blob. */ 4024 /* Load firmware blob. */
3927 blob = qla2x00_request_firmware(ha); 4025 blob = qla2x00_request_firmware(vha);
3928 if (!blob) { 4026 if (!blob) {
3929 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4027 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3930 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4028 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3933,13 +4031,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3933 /* Try to load RISC code from flash. */ 4031 /* Try to load RISC code from flash. */
3934 qla_printk(KERN_ERR, ha, "Attempting to load (potentially " 4032 qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
3935 "outdated) firmware from flash.\n"); 4033 "outdated) firmware from flash.\n");
3936 return qla24xx_load_risc_flash(ha, srisc_addr); 4034 return qla24xx_load_risc_flash(vha, srisc_addr);
3937 } 4035 }
3938 4036
3939 rval = QLA_SUCCESS; 4037 rval = QLA_SUCCESS;
3940 4038
3941 segments = FA_RISC_CODE_SEGMENTS; 4039 segments = FA_RISC_CODE_SEGMENTS;
3942 dcode = (uint32_t *)ha->request_ring; 4040 dcode = (uint32_t *)req->ring;
3943 *srisc_addr = 0; 4041 *srisc_addr = 0;
3944 fwcode = (uint32_t *)blob->fw->data; 4042 fwcode = (uint32_t *)blob->fw->data;
3945 fwclen = 0; 4043 fwclen = 0;
@@ -3987,17 +4085,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3987 dlen = risc_size; 4085 dlen = risc_size;
3988 4086
3989 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4087 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3990 "addr %x, number of dwords 0x%x.\n", ha->host_no, 4088 "addr %x, number of dwords 0x%x.\n", vha->host_no,
3991 risc_addr, dlen)); 4089 risc_addr, dlen));
3992 4090
3993 for (i = 0; i < dlen; i++) 4091 for (i = 0; i < dlen; i++)
3994 dcode[i] = swab32(fwcode[i]); 4092 dcode[i] = swab32(fwcode[i]);
3995 4093
3996 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 4094 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3997 dlen); 4095 dlen);
3998 if (rval) { 4096 if (rval) {
3999 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4097 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4000 "segment %d of firmware\n", ha->host_no, 4098 "segment %d of firmware\n", vha->host_no,
4001 fragment)); 4099 fragment));
4002 qla_printk(KERN_WARNING, ha, 4100 qla_printk(KERN_WARNING, ha,
4003 "[ERROR] Failed to load segment %d of " 4101 "[ERROR] Failed to load segment %d of "
@@ -4021,49 +4119,53 @@ fail_fw_integrity:
4021} 4119}
4022 4120
4023void 4121void
4024qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) 4122qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4025{ 4123{
4026 int ret, retries; 4124 int ret, retries;
4125 struct qla_hw_data *ha = vha->hw;
4027 4126
4028 if (!IS_FWI2_CAPABLE(ha)) 4127 if (!IS_FWI2_CAPABLE(ha))
4029 return; 4128 return;
4030 if (!ha->fw_major_version) 4129 if (!ha->fw_major_version)
4031 return; 4130 return;
4032 4131
4033 ret = qla2x00_stop_firmware(ha); 4132 ret = qla2x00_stop_firmware(vha);
4034 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4133 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4035 retries ; retries--) { 4134 retries ; retries--) {
4036 ha->isp_ops->reset_chip(ha); 4135 ha->isp_ops->reset_chip(vha);
4037 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS) 4136 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4038 continue; 4137 continue;
4039 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4138 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4040 continue; 4139 continue;
4041 qla_printk(KERN_INFO, ha, 4140 qla_printk(KERN_INFO, ha,
4042 "Attempting retry of stop-firmware command...\n"); 4141 "Attempting retry of stop-firmware command...\n");
4043 ret = qla2x00_stop_firmware(ha); 4142 ret = qla2x00_stop_firmware(vha);
4044 } 4143 }
4045} 4144}
4046 4145
4047int 4146int
4048qla24xx_configure_vhba(scsi_qla_host_t *ha) 4147qla24xx_configure_vhba(scsi_qla_host_t *vha)
4049{ 4148{
4050 int rval = QLA_SUCCESS; 4149 int rval = QLA_SUCCESS;
4051 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4150 uint16_t mb[MAILBOX_REGISTER_COUNT];
4151 struct qla_hw_data *ha = vha->hw;
4152 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4153 struct req_que *req = ha->req_q_map[0];
4154 struct rsp_que *rsp = ha->rsp_q_map[0];
4052 4155
4053 if (!ha->parent) 4156 if (!vha->vp_idx)
4054 return -EINVAL; 4157 return -EINVAL;
4055 4158
4056 rval = qla2x00_fw_ready(ha->parent); 4159 rval = qla2x00_fw_ready(base_vha);
4057 if (rval == QLA_SUCCESS) { 4160 if (rval == QLA_SUCCESS) {
4058 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4161 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4059 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 4162 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4060 } 4163 }
4061 4164
4062 ha->flags.management_server_logged_in = 0; 4165 vha->flags.management_server_logged_in = 0;
4063 4166
4064 /* Login to SNS first */ 4167 /* Login to SNS first */
4065 qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc, 4168 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4066 mb, BIT_1);
4067 if (mb[0] != MBS_COMMAND_COMPLETE) { 4169 if (mb[0] != MBS_COMMAND_COMPLETE) {
4068 DEBUG15(qla_printk(KERN_INFO, ha, 4170 DEBUG15(qla_printk(KERN_INFO, ha,
4069 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4171 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4072,11 +4174,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4072 return (QLA_FUNCTION_FAILED); 4174 return (QLA_FUNCTION_FAILED);
4073 } 4175 }
4074 4176
4075 atomic_set(&ha->loop_down_timer, 0); 4177 atomic_set(&vha->loop_down_timer, 0);
4076 atomic_set(&ha->loop_state, LOOP_UP); 4178 atomic_set(&vha->loop_state, LOOP_UP);
4077 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 4179 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4078 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 4180 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4079 rval = qla2x00_loop_resync(ha->parent); 4181 rval = qla2x00_loop_resync(base_vha);
4080 4182
4081 return rval; 4183 return rval;
4082} 4184}
@@ -4087,9 +4189,10 @@ static LIST_HEAD(qla_cs84xx_list);
4087static DEFINE_MUTEX(qla_cs84xx_mutex); 4189static DEFINE_MUTEX(qla_cs84xx_mutex);
4088 4190
4089static struct qla_chip_state_84xx * 4191static struct qla_chip_state_84xx *
4090qla84xx_get_chip(struct scsi_qla_host *ha) 4192qla84xx_get_chip(struct scsi_qla_host *vha)
4091{ 4193{
4092 struct qla_chip_state_84xx *cs84xx; 4194 struct qla_chip_state_84xx *cs84xx;
4195 struct qla_hw_data *ha = vha->hw;
4093 4196
4094 mutex_lock(&qla_cs84xx_mutex); 4197 mutex_lock(&qla_cs84xx_mutex);
4095 4198
@@ -4129,21 +4232,23 @@ __qla84xx_chip_release(struct kref *kref)
4129} 4232}
4130 4233
4131void 4234void
4132qla84xx_put_chip(struct scsi_qla_host *ha) 4235qla84xx_put_chip(struct scsi_qla_host *vha)
4133{ 4236{
4237 struct qla_hw_data *ha = vha->hw;
4134 if (ha->cs84xx) 4238 if (ha->cs84xx)
4135 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4239 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4136} 4240}
4137 4241
4138static int 4242static int
4139qla84xx_init_chip(scsi_qla_host_t *ha) 4243qla84xx_init_chip(scsi_qla_host_t *vha)
4140{ 4244{
4141 int rval; 4245 int rval;
4142 uint16_t status[2]; 4246 uint16_t status[2];
4247 struct qla_hw_data *ha = vha->hw;
4143 4248
4144 mutex_lock(&ha->cs84xx->fw_update_mutex); 4249 mutex_lock(&ha->cs84xx->fw_update_mutex);
4145 4250
4146 rval = qla84xx_verify_chip(ha, status); 4251 rval = qla84xx_verify_chip(vha, status);
4147 4252
4148 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4253 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4149 4254
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..5e0a7095c9f2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,47 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
32} 32}
33 33
34static inline void 34static inline void
35qla2x00_poll(scsi_qla_host_t *ha) 35qla2x00_poll(struct rsp_que *rsp)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 38 struct qla_hw_data *ha = rsp->hw;
39 local_irq_save(flags); 39 local_irq_save(flags);
40 ha->isp_ops->intr_handler(0, ha); 40 ha->isp_ops->intr_handler(0, rsp);
41 local_irq_restore(flags); 41 local_irq_restore(flags);
42} 42}
43 43
44static __inline__ scsi_qla_host_t *
45to_qla_parent(scsi_qla_host_t *ha)
46{
47 return ha->parent ? ha->parent : ha;
48}
49
50/**
51 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock
54 *
55 * Returns non-zero if a failure occurred, else zero.
56 */
57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
59{
60 /* Send marker if required */
61 if (ha->marker_needed != 0) {
62 if (ha_locked) {
63 if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
64 QLA_SUCCESS)
65 return (QLA_FUNCTION_FAILED);
66 } else {
67 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
68 QLA_SUCCESS)
69 return (QLA_FUNCTION_FAILED);
70 }
71 ha->marker_needed = 0;
72 }
73 return (QLA_SUCCESS);
74}
75
76static inline uint8_t * 44static inline uint8_t *
77host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 45host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
78{ 46{
@@ -87,11 +55,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
87} 55}
88 56
89static inline int 57static inline int
90qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 58qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
91{ 59{
60 struct qla_hw_data *ha = vha->hw;
92 if (IS_FWI2_CAPABLE(ha)) 61 if (IS_FWI2_CAPABLE(ha))
93 return (loop_id > NPH_LAST_HANDLE); 62 return (loop_id > NPH_LAST_HANDLE);
94 63
95 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 64 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
96 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 65 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
97}; 66}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 14static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15static void qla2x00_isp_cmd(scsi_qla_host_t *ha); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
16 17
17/** 18/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */ 31 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 33 cflags = CF_WRITE;
33 sp->fcport->ha->qla_stats.output_bytes += 34 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd); 35 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 37 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes += 38 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd); 39 scsi_bufflen(sp->cmd);
39 } 40 }
40 return (cflags); 41 return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */ 93 */
93static inline cont_entry_t * 94static inline cont_entry_t *
94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha) 95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
95{ 96{
96 cont_entry_t *cont_pkt; 97 cont_entry_t *cont_pkt;
97
98 /* Adjust ring index. */ 98 /* Adjust ring index. */
99 ha->req_ring_index++; 99 req->ring_index++;
100 if (ha->req_ring_index == ha->request_q_length) { 100 if (req->ring_index == req->length) {
101 ha->req_ring_index = 0; 101 req->ring_index = 0;
102 ha->request_ring_ptr = ha->request_ring; 102 req->ring_ptr = req->ring;
103 } else { 103 } else {
104 ha->request_ring_ptr++; 104 req->ring_ptr++;
105 } 105 }
106 106
107 cont_pkt = (cont_entry_t *)ha->request_ring_ptr; 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 108
109 /* Load packet defaults. */ 109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = 110 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha) 123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 126
127 /* Adjust ring index. */ 127 /* Adjust ring index. */
128 ha->req_ring_index++; 128 req->ring_index++;
129 if (ha->req_ring_index == ha->request_q_length) { 129 if (req->ring_index == req->length) {
130 ha->req_ring_index = 0; 130 req->ring_index = 0;
131 ha->request_ring_ptr = ha->request_ring; 131 req->ring_ptr = req->ring;
132 } else { 132 } else {
133 ha->request_ring_ptr++; 133 req->ring_ptr++;
134 } 134 }
135 135
136 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr; 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 137
138 /* Load packet defaults. */ 138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = 139 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155{ 155{
156 uint16_t avail_dsds; 156 uint16_t avail_dsds;
157 uint32_t *cur_dsd; 157 uint32_t *cur_dsd;
158 scsi_qla_host_t *ha; 158 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd; 159 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 160 struct scatterlist *sg;
161 int i; 161 int i;
162 struct req_que *req;
162 163
163 cmd = sp->cmd; 164 cmd = sp->cmd;
164 165
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
172 return; 173 return;
173 } 174 }
174 175
175 ha = sp->ha; 176 vha = sp->vha;
177 req = sp->que;
176 178
177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178 180
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
190 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
191 * Type 0 IOCB. 193 * Type 0 IOCB.
192 */ 194 */
193 cont_pkt = qla2x00_prep_cont_type0_iocb(ha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
194 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195 avail_dsds = 7; 197 avail_dsds = 7;
196 } 198 }
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214{ 216{
215 uint16_t avail_dsds; 217 uint16_t avail_dsds;
216 uint32_t *cur_dsd; 218 uint32_t *cur_dsd;
217 scsi_qla_host_t *ha; 219 scsi_qla_host_t *vha;
218 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
219 struct scatterlist *sg; 221 struct scatterlist *sg;
220 int i; 222 int i;
223 struct req_que *req;
221 224
222 cmd = sp->cmd; 225 cmd = sp->cmd;
223 226
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
231 return; 234 return;
232 } 235 }
233 236
234 ha = sp->ha; 237 vha = sp->vha;
238 req = sp->que;
235 239
236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237 241
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 * Five DSDs are available in the Continuation 254 * Five DSDs are available in the Continuation
251 * Type 1 IOCB. 255 * Type 1 IOCB.
252 */ 256 */
253 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
254 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255 avail_dsds = 5; 259 avail_dsds = 5;
256 } 260 }
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
274{ 278{
275 int ret, nseg; 279 int ret, nseg;
276 unsigned long flags; 280 unsigned long flags;
277 scsi_qla_host_t *ha; 281 scsi_qla_host_t *vha;
278 struct scsi_cmnd *cmd; 282 struct scsi_cmnd *cmd;
279 uint32_t *clr_ptr; 283 uint32_t *clr_ptr;
280 uint32_t index; 284 uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
284 uint16_t req_cnt; 288 uint16_t req_cnt;
285 uint16_t tot_dsds; 289 uint16_t tot_dsds;
286 struct device_reg_2xxx __iomem *reg; 290 struct device_reg_2xxx __iomem *reg;
291 struct qla_hw_data *ha;
292 struct req_que *req;
293 struct rsp_que *rsp;
287 294
288 /* Setup device pointers. */ 295 /* Setup device pointers. */
289 ret = 0; 296 ret = 0;
290 ha = sp->ha; 297 vha = sp->vha;
298 ha = vha->hw;
291 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
292 cmd = sp->cmd; 300 cmd = sp->cmd;
301 req = ha->req_q_map[0];
302 rsp = ha->rsp_q_map[0];
293 /* So we know we haven't pci_map'ed anything yet */ 303 /* So we know we haven't pci_map'ed anything yet */
294 tot_dsds = 0; 304 tot_dsds = 0;
295 305
296 /* Send marker if required */ 306 /* Send marker if required */
297 if (ha->marker_needed != 0) { 307 if (vha->marker_needed != 0) {
298 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 308 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
309 != QLA_SUCCESS)
299 return (QLA_FUNCTION_FAILED); 310 return (QLA_FUNCTION_FAILED);
300 } 311 vha->marker_needed = 0;
301 ha->marker_needed = 0;
302 } 312 }
303 313
304 /* Acquire ring specific lock */ 314 /* Acquire ring specific lock */
305 spin_lock_irqsave(&ha->hardware_lock, flags); 315 spin_lock_irqsave(&ha->hardware_lock, flags);
306 316
307 /* Check for room in outstanding command list. */ 317 /* Check for room in outstanding command list. */
308 handle = ha->current_outstanding_cmd; 318 handle = req->current_outstanding_cmd;
309 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 319 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310 handle++; 320 handle++;
311 if (handle == MAX_OUTSTANDING_COMMANDS) 321 if (handle == MAX_OUTSTANDING_COMMANDS)
312 handle = 1; 322 handle = 1;
313 if (!ha->outstanding_cmds[handle]) 323 if (!req->outstanding_cmds[handle])
314 break; 324 break;
315 } 325 }
316 if (index == MAX_OUTSTANDING_COMMANDS) 326 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
329 339
330 /* Calculate the number of request entries needed. */ 340 /* Calculate the number of request entries needed. */
331 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 341 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332 if (ha->req_q_cnt < (req_cnt + 2)) { 342 if (req->cnt < (req_cnt + 2)) {
333 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 343 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334 if (ha->req_ring_index < cnt) 344 if (req->ring_index < cnt)
335 ha->req_q_cnt = cnt - ha->req_ring_index; 345 req->cnt = cnt - req->ring_index;
336 else 346 else
337 ha->req_q_cnt = ha->request_q_length - 347 req->cnt = req->length -
338 (ha->req_ring_index - cnt); 348 (req->ring_index - cnt);
339 } 349 }
340 if (ha->req_q_cnt < (req_cnt + 2)) 350 if (req->cnt < (req_cnt + 2))
341 goto queuing_error; 351 goto queuing_error;
342 352
343 /* Build command packet */ 353 /* Build command packet */
344 ha->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
345 ha->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
346 sp->ha = ha; 356 sp->vha = vha;
357 sp->que = req;
347 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348 ha->req_q_cnt -= req_cnt; 359 req->cnt -= req_cnt;
349 360
350 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr; 361 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
351 cmd_pkt->handle = handle; 362 cmd_pkt->handle = handle;
352 /* Zero out remaining portion of packet. */ 363 /* Zero out remaining portion of packet. */
353 clr_ptr = (uint32_t *)cmd_pkt + 2; 364 clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
373 wmb(); 384 wmb();
374 385
375 /* Adjust ring index. */ 386 /* Adjust ring index. */
376 ha->req_ring_index++; 387 req->ring_index++;
377 if (ha->req_ring_index == ha->request_q_length) { 388 if (req->ring_index == req->length) {
378 ha->req_ring_index = 0; 389 req->ring_index = 0;
379 ha->request_ring_ptr = ha->request_ring; 390 req->ring_ptr = req->ring;
380 } else 391 } else
381 ha->request_ring_ptr++; 392 req->ring_ptr++;
382 393
383 sp->flags |= SRB_DMA_VALID; 394 sp->flags |= SRB_DMA_VALID;
384 395
385 /* Set chip new ring index. */ 396 /* Set chip new ring index. */
386 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); 397 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
387 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 398 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
388 399
389 /* Manage unprocessed RIO/ZIO commands in response queue. */ 400 /* Manage unprocessed RIO/ZIO commands in response queue. */
390 if (ha->flags.process_response_queue && 401 if (vha->flags.process_response_queue &&
391 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 402 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
392 qla2x00_process_response_queue(ha); 403 qla2x00_process_response_queue(rsp);
393 404
394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 405 spin_unlock_irqrestore(&ha->hardware_lock, flags);
395 return (QLA_SUCCESS); 406 return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
415 * Returns non-zero if a failure occurred, else zero. 426 * Returns non-zero if a failure occurred, else zero.
416 */ 427 */
417int 428int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 429__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
419 uint8_t type) 430 struct rsp_que *rsp, uint16_t loop_id,
431 uint16_t lun, uint8_t type)
420{ 432{
421 mrk_entry_t *mrk; 433 mrk_entry_t *mrk;
422 struct mrk_entry_24xx *mrk24; 434 struct mrk_entry_24xx *mrk24;
423 scsi_qla_host_t *pha = to_qla_parent(ha); 435 struct qla_hw_data *ha = vha->hw;
436 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
424 437
425 mrk24 = NULL; 438 mrk24 = NULL;
426 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); 439 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
427 if (mrk == NULL) { 440 if (mrk == NULL) {
428 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 441 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429 __func__, ha->host_no)); 442 __func__, base_vha->host_no));
430 443
431 return (QLA_FUNCTION_FAILED); 444 return (QLA_FUNCTION_FAILED);
432 } 445 }
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
440 mrk24->lun[1] = LSB(lun); 453 mrk24->lun[1] = LSB(lun);
441 mrk24->lun[2] = MSB(lun); 454 mrk24->lun[2] = MSB(lun);
442 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 455 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443 mrk24->vp_index = ha->vp_idx; 456 mrk24->vp_index = vha->vp_idx;
444 } else { 457 } else {
445 SET_TARGET_ID(ha, mrk->target, loop_id); 458 SET_TARGET_ID(ha, mrk->target, loop_id);
446 mrk->lun = cpu_to_le16(lun); 459 mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
448 } 461 }
449 wmb(); 462 wmb();
450 463
451 qla2x00_isp_cmd(pha); 464 qla2x00_isp_cmd(vha, req);
452 465
453 return (QLA_SUCCESS); 466 return (QLA_SUCCESS);
454} 467}
455 468
456int 469int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 470qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
458 uint8_t type) 471 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
472 uint8_t type)
459{ 473{
460 int ret; 474 int ret;
461 unsigned long flags = 0; 475 unsigned long flags = 0;
462 scsi_qla_host_t *pha = to_qla_parent(ha);
463 476
464 spin_lock_irqsave(&pha->hardware_lock, flags); 477 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
465 ret = __qla2x00_marker(ha, loop_id, lun, type); 478 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
466 spin_unlock_irqrestore(&pha->hardware_lock, flags); 479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
467 480
468 return (ret); 481 return (ret);
469} 482}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
477 * Returns NULL if function failed, else, a pointer to the request packet. 490 * Returns NULL if function failed, else, a pointer to the request packet.
478 */ 491 */
479static request_t * 492static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha) 493qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
494 struct rsp_que *rsp)
481{ 495{
482 device_reg_t __iomem *reg = ha->iobase; 496 struct qla_hw_data *ha = vha->hw;
497 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
483 request_t *pkt = NULL; 498 request_t *pkt = NULL;
484 uint16_t cnt; 499 uint16_t cnt;
485 uint32_t *dword_ptr; 500 uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
488 503
489 /* Wait 1 second for slot. */ 504 /* Wait 1 second for slot. */
490 for (timer = HZ; timer; timer--) { 505 for (timer = HZ; timer; timer--) {
491 if ((req_cnt + 2) >= ha->req_q_cnt) { 506 if ((req_cnt + 2) >= req->cnt) {
492 /* Calculate number of free request entries. */ 507 /* Calculate number of free request entries. */
493 if (IS_FWI2_CAPABLE(ha)) 508 if (ha->mqenable)
494 cnt = (uint16_t)RD_REG_DWORD( 509 cnt = (uint16_t)
495 &reg->isp24.req_q_out); 510 RD_REG_DWORD(&reg->isp25mq.req_q_out);
496 else 511 else {
497 cnt = qla2x00_debounce_register( 512 if (IS_FWI2_CAPABLE(ha))
498 ISP_REQ_Q_OUT(ha, &reg->isp)); 513 cnt = (uint16_t)RD_REG_DWORD(
499 if (ha->req_ring_index < cnt) 514 &reg->isp24.req_q_out);
500 ha->req_q_cnt = cnt - ha->req_ring_index; 515 else
516 cnt = qla2x00_debounce_register(
517 ISP_REQ_Q_OUT(ha, &reg->isp));
518 }
519 if (req->ring_index < cnt)
520 req->cnt = cnt - req->ring_index;
501 else 521 else
502 ha->req_q_cnt = ha->request_q_length - 522 req->cnt = req->length -
503 (ha->req_ring_index - cnt); 523 (req->ring_index - cnt);
504 } 524 }
505 /* If room for request in request ring. */ 525 /* If room for request in request ring. */
506 if ((req_cnt + 2) < ha->req_q_cnt) { 526 if ((req_cnt + 2) < req->cnt) {
507 ha->req_q_cnt--; 527 req->cnt--;
508 pkt = ha->request_ring_ptr; 528 pkt = req->ring_ptr;
509 529
510 /* Zero out packet. */ 530 /* Zero out packet. */
511 dword_ptr = (uint32_t *)pkt; 531 dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
513 *dword_ptr++ = 0; 533 *dword_ptr++ = 0;
514 534
515 /* Set system defined field. */ 535 /* Set system defined field. */
516 pkt->sys_define = (uint8_t)ha->req_ring_index; 536 pkt->sys_define = (uint8_t)req->ring_index;
517 537
518 /* Set entry count. */ 538 /* Set entry count. */
519 pkt->entry_count = 1; 539 pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
522 } 542 }
523 543
524 /* Release ring specific lock */ 544 /* Release ring specific lock */
525 spin_unlock(&ha->hardware_lock); 545 spin_unlock_irq(&ha->hardware_lock);
526 546
527 udelay(2); /* 2 us */ 547 udelay(2); /* 2 us */
528 548
529 /* Check for pending interrupts. */ 549 /* Check for pending interrupts. */
530 /* During init we issue marker directly */ 550 /* During init we issue marker directly */
531 if (!ha->marker_needed && !ha->flags.init_done) 551 if (!vha->marker_needed && !vha->flags.init_done)
532 qla2x00_poll(ha); 552 qla2x00_poll(rsp);
533
534 spin_lock_irq(&ha->hardware_lock); 553 spin_lock_irq(&ha->hardware_lock);
535 } 554 }
536 if (!pkt) { 555 if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
547 * Note: The caller must hold the hardware lock before calling this routine. 566 * Note: The caller must hold the hardware lock before calling this routine.
548 */ 567 */
549static void 568static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha) 569qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
551{ 570{
552 device_reg_t __iomem *reg = ha->iobase; 571 struct qla_hw_data *ha = vha->hw;
572 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
573 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
553 574
554 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 575 DEBUG5(printk("%s(): IOCB data:\n", __func__));
555 DEBUG5(qla2x00_dump_buffer( 576 DEBUG5(qla2x00_dump_buffer(
556 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE)); 577 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
557 578
558 /* Adjust ring index. */ 579 /* Adjust ring index. */
559 ha->req_ring_index++; 580 req->ring_index++;
560 if (ha->req_ring_index == ha->request_q_length) { 581 if (req->ring_index == req->length) {
561 ha->req_ring_index = 0; 582 req->ring_index = 0;
562 ha->request_ring_ptr = ha->request_ring; 583 req->ring_ptr = req->ring;
563 } else 584 } else
564 ha->request_ring_ptr++; 585 req->ring_ptr++;
565 586
566 /* Set chip new ring index. */ 587 /* Set chip new ring index. */
567 if (IS_FWI2_CAPABLE(ha)) { 588 if (ha->mqenable) {
568 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 589 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
569 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 590 RD_REG_DWORD(&ioreg->hccr);
570 } else { 591 }
571 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index); 592 else {
572 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 593 if (IS_FWI2_CAPABLE(ha)) {
594 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
595 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
596 } else {
597 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
598 req->ring_index);
599 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
600 }
573 } 601 }
574 602
575} 603}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
610{ 638{
611 uint16_t avail_dsds; 639 uint16_t avail_dsds;
612 uint32_t *cur_dsd; 640 uint32_t *cur_dsd;
613 scsi_qla_host_t *ha; 641 scsi_qla_host_t *vha;
614 struct scsi_cmnd *cmd; 642 struct scsi_cmnd *cmd;
615 struct scatterlist *sg; 643 struct scatterlist *sg;
616 int i; 644 int i;
645 struct req_que *req;
617 646
618 cmd = sp->cmd; 647 cmd = sp->cmd;
619 648
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
627 return; 656 return;
628 } 657 }
629 658
630 ha = sp->ha; 659 vha = sp->vha;
660 req = sp->que;
631 661
632 /* Set transfer direction */ 662 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 663 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->task_mgmt_flags = 664 cmd_pkt->task_mgmt_flags =
635 __constant_cpu_to_le16(TMF_WRITE_DATA); 665 __constant_cpu_to_le16(TMF_WRITE_DATA);
636 sp->fcport->ha->qla_stats.output_bytes += 666 sp->fcport->vha->hw->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd); 667 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 668 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->task_mgmt_flags = 669 cmd_pkt->task_mgmt_flags =
640 __constant_cpu_to_le16(TMF_READ_DATA); 670 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes += 671 sp->fcport->vha->hw->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd); 672 scsi_bufflen(sp->cmd);
643 } 673 }
644 674
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
658 * Five DSDs are available in the Continuation 688 * Five DSDs are available in the Continuation
659 * Type 1 IOCB. 689 * Type 1 IOCB.
660 */ 690 */
661 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 691 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
662 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 692 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663 avail_dsds = 5; 693 avail_dsds = 5;
664 } 694 }
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
683{ 713{
684 int ret, nseg; 714 int ret, nseg;
685 unsigned long flags; 715 unsigned long flags;
686 scsi_qla_host_t *ha, *pha;
687 struct scsi_cmnd *cmd;
688 uint32_t *clr_ptr; 716 uint32_t *clr_ptr;
689 uint32_t index; 717 uint32_t index;
690 uint32_t handle; 718 uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
692 uint16_t cnt; 720 uint16_t cnt;
693 uint16_t req_cnt; 721 uint16_t req_cnt;
694 uint16_t tot_dsds; 722 uint16_t tot_dsds;
695 struct device_reg_24xx __iomem *reg; 723 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha;
727 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id;
696 729
697 /* Setup device pointers. */ 730 /* Setup device pointers. */
698 ret = 0; 731 ret = 0;
699 ha = sp->ha; 732 que_id = vha->req_ques[0];
700 pha = to_qla_parent(ha); 733
701 reg = &ha->iobase->isp24; 734 req = ha->req_q_map[que_id];
702 cmd = sp->cmd; 735 sp->que = req;
736
737 if (req->rsp)
738 rsp = req->rsp;
739 else
740 rsp = ha->rsp_q_map[que_id];
703 /* So we know we haven't pci_map'ed anything yet */ 741 /* So we know we haven't pci_map'ed anything yet */
704 tot_dsds = 0; 742 tot_dsds = 0;
705 743
706 /* Send marker if required */ 744 /* Send marker if required */
707 if (ha->marker_needed != 0) { 745 if (vha->marker_needed != 0) {
708 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 746 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
747 != QLA_SUCCESS)
709 return QLA_FUNCTION_FAILED; 748 return QLA_FUNCTION_FAILED;
710 } 749 vha->marker_needed = 0;
711 ha->marker_needed = 0;
712 } 750 }
713 751
714 /* Acquire ring specific lock */ 752 /* Acquire ring specific lock */
715 spin_lock_irqsave(&pha->hardware_lock, flags); 753 spin_lock_irqsave(&ha->hardware_lock, flags);
716 754
717 /* Check for room in outstanding command list. */ 755 /* Check for room in outstanding command list. */
718 handle = ha->current_outstanding_cmd; 756 handle = req->current_outstanding_cmd;
719 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 757 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720 handle++; 758 handle++;
721 if (handle == MAX_OUTSTANDING_COMMANDS) 759 if (handle == MAX_OUTSTANDING_COMMANDS)
722 handle = 1; 760 handle = 1;
723 if (!ha->outstanding_cmds[handle]) 761 if (!req->outstanding_cmds[handle])
724 break; 762 break;
725 } 763 }
726 if (index == MAX_OUTSTANDING_COMMANDS) 764 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
738 tot_dsds = nseg; 776 tot_dsds = nseg;
739 777
740 req_cnt = qla24xx_calc_iocbs(tot_dsds); 778 req_cnt = qla24xx_calc_iocbs(tot_dsds);
741 if (ha->req_q_cnt < (req_cnt + 2)) { 779 if (req->cnt < (req_cnt + 2)) {
742 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out); 780 cnt = ha->isp_ops->rd_req_reg(ha, req->id);
743 if (ha->req_ring_index < cnt) 781
744 ha->req_q_cnt = cnt - ha->req_ring_index; 782 if (req->ring_index < cnt)
783 req->cnt = cnt - req->ring_index;
745 else 784 else
746 ha->req_q_cnt = ha->request_q_length - 785 req->cnt = req->length -
747 (ha->req_ring_index - cnt); 786 (req->ring_index - cnt);
748 } 787 }
749 if (ha->req_q_cnt < (req_cnt + 2)) 788 if (req->cnt < (req_cnt + 2))
750 goto queuing_error; 789 goto queuing_error;
751 790
752 /* Build command packet. */ 791 /* Build command packet. */
753 ha->current_outstanding_cmd = handle; 792 req->current_outstanding_cmd = handle;
754 ha->outstanding_cmds[handle] = sp; 793 req->outstanding_cmds[handle] = sp;
755 sp->ha = ha; 794 sp->vha = vha;
756 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757 ha->req_q_cnt -= req_cnt; 796 req->cnt -= req_cnt;
758 797
759 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; 798 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
760 cmd_pkt->handle = handle; 799 cmd_pkt->handle = handle;
761 800
762 /* Zero out remaining portion of packet. */ 801 /* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
789 wmb(); 828 wmb();
790 829
791 /* Adjust ring index. */ 830 /* Adjust ring index. */
792 ha->req_ring_index++; 831 req->ring_index++;
793 if (ha->req_ring_index == ha->request_q_length) { 832 if (req->ring_index == req->length) {
794 ha->req_ring_index = 0; 833 req->ring_index = 0;
795 ha->request_ring_ptr = ha->request_ring; 834 req->ring_ptr = req->ring;
796 } else 835 } else
797 ha->request_ring_ptr++; 836 req->ring_ptr++;
798 837
799 sp->flags |= SRB_DMA_VALID; 838 sp->flags |= SRB_DMA_VALID;
800 839
801 /* Set chip new ring index. */ 840 /* Set chip new ring index. */
802 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index); 841 ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
803 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
804 842
805 /* Manage unprocessed RIO/ZIO commands in response queue. */ 843 /* Manage unprocessed RIO/ZIO commands in response queue. */
806 if (ha->flags.process_response_queue && 844 if (vha->flags.process_response_queue &&
807 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 845 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
808 qla24xx_process_response_queue(ha); 846 qla24xx_process_response_queue(rsp);
809 847
810 spin_unlock_irqrestore(&pha->hardware_lock, flags); 848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 return QLA_SUCCESS; 849 return QLA_SUCCESS;
812 850
813queuing_error: 851queuing_error:
814 if (tot_dsds) 852 if (tot_dsds)
815 scsi_dma_unmap(cmd); 853 scsi_dma_unmap(cmd);
816 854
817 spin_unlock_irqrestore(&pha->hardware_lock, flags); 855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
818 856
819 return QLA_FUNCTION_FAILED; 857 return QLA_FUNCTION_FAILED;
820} 858}
859
860uint16_t
861qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
862{
863 device_reg_t __iomem *reg = (void *) ha->iobase;
864 return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
865}
866
867uint16_t
868qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
869{
870 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
871 return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
872}
873
874void
875qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
876{
877 device_reg_t __iomem *reg = (void *) ha->iobase;
878 WRT_REG_DWORD(&reg->isp24.req_q_in, index);
879 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
880}
881
882void
883qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
884{
885 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
886 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
887 WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
888 RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
889}
890
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a76efd99d007..d5fb79a88001 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,13 @@
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11 11
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
17 20
18/** 21/**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +30,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
27irqreturn_t 30irqreturn_t
28qla2100_intr_handler(int irq, void *dev_id) 31qla2100_intr_handler(int irq, void *dev_id)
29{ 32{
30 scsi_qla_host_t *ha; 33 scsi_qla_host_t *vha;
34 struct qla_hw_data *ha;
31 struct device_reg_2xxx __iomem *reg; 35 struct device_reg_2xxx __iomem *reg;
32 int status; 36 int status;
33 unsigned long iter; 37 unsigned long iter;
34 uint16_t hccr; 38 uint16_t hccr;
35 uint16_t mb[4]; 39 uint16_t mb[4];
40 struct rsp_que *rsp;
36 41
37 ha = (scsi_qla_host_t *) dev_id; 42 rsp = (struct rsp_que *) dev_id;
38 if (!ha) { 43 if (!rsp) {
39 printk(KERN_INFO 44 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__); 45 "%s(): NULL response queue pointer\n", __func__);
41 return (IRQ_NONE); 46 return (IRQ_NONE);
42 } 47 }
43 48
49 ha = rsp->hw;
44 reg = &ha->iobase->isp; 50 reg = &ha->iobase->isp;
45 status = 0; 51 status = 0;
46 52
47 spin_lock(&ha->hardware_lock); 53 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp);
48 for (iter = 50; iter--; ) { 55 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr); 56 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) { 57 if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +66,8 @@ qla2100_intr_handler(int irq, void *dev_id)
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 66 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr); 67 RD_REG_WORD(&reg->hccr);
61 68
62 ha->isp_ops->fw_dump(ha, 1); 69 ha->isp_ops->fw_dump(vha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
64 break; 71 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) 72 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break; 73 break;
@@ -72,24 +79,24 @@ qla2100_intr_handler(int irq, void *dev_id)
72 /* Get mailbox data. */ 79 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 80 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 81 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]); 82 qla2x00_mbx_completion(vha, mb[0]);
76 status |= MBX_INTERRUPT; 83 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb); 88 qla2x00_async_event(vha, rsp, mb);
82 } else { 89 } else {
83 /*EMPTY*/ 90 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized " 91 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n", 92 "interrupt type (%d).\n",
86 ha->host_no, mb[0])); 93 vha->host_no, mb[0]));
87 } 94 }
88 /* Release mailbox registers. */ 95 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0); 96 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore); 97 RD_REG_WORD(&reg->semaphore);
91 } else { 98 } else {
92 qla2x00_process_response_queue(ha); 99 qla2x00_process_response_queue(rsp);
93 100
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
@@ -118,25 +125,29 @@ qla2100_intr_handler(int irq, void *dev_id)
118irqreturn_t 125irqreturn_t
119qla2300_intr_handler(int irq, void *dev_id) 126qla2300_intr_handler(int irq, void *dev_id)
120{ 127{
121 scsi_qla_host_t *ha; 128 scsi_qla_host_t *vha;
122 struct device_reg_2xxx __iomem *reg; 129 struct device_reg_2xxx __iomem *reg;
123 int status; 130 int status;
124 unsigned long iter; 131 unsigned long iter;
125 uint32_t stat; 132 uint32_t stat;
126 uint16_t hccr; 133 uint16_t hccr;
127 uint16_t mb[4]; 134 uint16_t mb[4];
135 struct rsp_que *rsp;
136 struct qla_hw_data *ha;
128 137
129 ha = (scsi_qla_host_t *) dev_id; 138 rsp = (struct rsp_que *) dev_id;
130 if (!ha) { 139 if (!rsp) {
131 printk(KERN_INFO 140 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__); 141 "%s(): NULL response queue pointer\n", __func__);
133 return (IRQ_NONE); 142 return (IRQ_NONE);
134 } 143 }
135 144
145 ha = rsp->hw;
136 reg = &ha->iobase->isp; 146 reg = &ha->iobase->isp;
137 status = 0; 147 status = 0;
138 148
139 spin_lock(&ha->hardware_lock); 149 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp);
140 for (iter = 50; iter--; ) { 151 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) { 153 if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +170,8 @@ qla2300_intr_handler(int irq, void *dev_id)
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr); 171 RD_REG_WORD(&reg->hccr);
161 172
162 ha->isp_ops->fw_dump(ha, 1); 173 ha->isp_ops->fw_dump(vha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
164 break; 175 break;
165 } else if ((stat & HSR_RISC_INT) == 0) 176 } else if ((stat & HSR_RISC_INT) == 0)
166 break; 177 break;
@@ -170,7 +181,7 @@ qla2300_intr_handler(int irq, void *dev_id)
170 case 0x2: 181 case 0x2:
171 case 0x10: 182 case 0x10:
172 case 0x11: 183 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat)); 184 qla2x00_mbx_completion(vha, MSW(stat));
174 status |= MBX_INTERRUPT; 185 status |= MBX_INTERRUPT;
175 186
176 /* Release mailbox registers. */ 187 /* Release mailbox registers. */
@@ -181,26 +192,26 @@ qla2300_intr_handler(int irq, void *dev_id)
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb); 195 qla2x00_async_event(vha, rsp, mb);
185 break; 196 break;
186 case 0x13: 197 case 0x13:
187 qla2x00_process_response_queue(ha); 198 qla2x00_process_response_queue(rsp);
188 break; 199 break;
189 case 0x15: 200 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT; 201 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat); 202 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb); 203 qla2x00_async_event(vha, rsp, mb);
193 break; 204 break;
194 case 0x16: 205 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION; 206 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat); 207 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb); 209 qla2x00_async_event(vha, rsp, mb);
199 break; 210 break;
200 default: 211 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n", 213 "(%d).\n",
203 ha->host_no, stat & 0xff)); 214 vha->host_no, stat & 0xff));
204 break; 215 break;
205 } 216 }
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +234,11 @@ qla2300_intr_handler(int irq, void *dev_id)
223 * @mb0: Mailbox0 register 234 * @mb0: Mailbox0 register
224 */ 235 */
225static void 236static void
226qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 237qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
227{ 238{
228 uint16_t cnt; 239 uint16_t cnt;
229 uint16_t __iomem *wptr; 240 uint16_t __iomem *wptr;
241 struct qla_hw_data *ha = vha->hw;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 243
232 /* Load return mailbox registers. */ 244 /* Load return mailbox registers. */
@@ -247,10 +259,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
247 259
248 if (ha->mcp) { 260 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0])); 262 __func__, vha->host_no, ha->mcp->mb[0]));
251 } else { 263 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no)); 265 __func__, vha->host_no));
254 } 266 }
255} 267}
256 268
@@ -260,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
260 * @mb: Mailbox registers (0 - 3) 272 * @mb: Mailbox registers (0 - 3)
261 */ 273 */
262void 274void
263qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
264{ 276{
265#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +280,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
268 uint16_t handle_cnt; 280 uint16_t handle_cnt;
269 uint16_t cnt; 281 uint16_t cnt;
270 uint32_t handles[5]; 282 uint32_t handles[5];
283 struct qla_hw_data *ha = vha->hw;
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 284 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid; 285 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index; 286 uint8_t rscn_queue_index;
@@ -329,17 +342,19 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
329 342
330 switch (mb[0]) { 343 switch (mb[0]) {
331 case MBA_SCSI_COMPLETION: /* Fast Post */ 344 case MBA_SCSI_COMPLETION: /* Fast Post */
332 if (!ha->flags.online) 345 if (!vha->flags.online)
333 break; 346 break;
334 347
335 for (cnt = 0; cnt < handle_cnt; cnt++) 348 for (cnt = 0; cnt < handle_cnt; cnt++)
336 qla2x00_process_completed_request(ha, handles[cnt]); 349 qla2x00_process_completed_request(vha, rsp->req,
350 handles[cnt]);
337 break; 351 break;
338 352
339 case MBA_RESET: /* Reset */ 353 case MBA_RESET: /* Reset */
340 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); 354 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355 vha->host_no));
341 356
342 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 357 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
343 break; 358 break;
344 359
345 case MBA_SYSTEM_ERR: /* System Error */ 360 case MBA_SYSTEM_ERR: /* System Error */
@@ -347,70 +362,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
347 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 mb[1], mb[2], mb[3]); 363 mb[1], mb[2], mb[3]);
349 364
350 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
351 ha->isp_ops->fw_dump(ha, 1); 366 ha->isp_ops->fw_dump(vha, 1);
352 367
353 if (IS_FWI2_CAPABLE(ha)) { 368 if (IS_FWI2_CAPABLE(ha)) {
354 if (mb[1] == 0 && mb[2] == 0) { 369 if (mb[1] == 0 && mb[2] == 0) {
355 qla_printk(KERN_ERR, ha, 370 qla_printk(KERN_ERR, ha,
356 "Unrecoverable Hardware Error: adapter " 371 "Unrecoverable Hardware Error: adapter "
357 "marked OFFLINE!\n"); 372 "marked OFFLINE!\n");
358 ha->flags.online = 0; 373 vha->flags.online = 0;
359 } else 374 } else
360 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 375 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
361 } else if (mb[1] == 0) { 376 } else if (mb[1] == 0) {
362 qla_printk(KERN_INFO, ha, 377 qla_printk(KERN_INFO, ha,
363 "Unrecoverable Hardware Error: adapter marked " 378 "Unrecoverable Hardware Error: adapter marked "
364 "OFFLINE!\n"); 379 "OFFLINE!\n");
365 ha->flags.online = 0; 380 vha->flags.online = 0;
366 } else 381 } else
367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 382 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break; 383 break;
369 384
370 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 385 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
371 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 386 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 ha->host_no)); 387 vha->host_no));
373 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
374 389
375 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
376 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
377 break; 392 break;
378 393
379 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 394 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
380 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 395 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 ha->host_no)); 396 vha->host_no));
382 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
383 398
384 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
385 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
386 break; 401 break;
387 402
388 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 403 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
389 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 404 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 ha->host_no)); 405 vha->host_no));
391 break; 406 break;
392 407
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 408 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, 409 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
395 mb[1])); 410 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); 411 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 412
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 413 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 414 atomic_set(&vha->loop_state, LOOP_DOWN);
400 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 415 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
401 qla2x00_mark_all_devices_lost(ha, 1); 416 qla2x00_mark_all_devices_lost(vha, 1);
402 } 417 }
403 418
404 if (ha->parent) { 419 if (vha->vp_idx) {
405 atomic_set(&ha->vp_state, VP_FAILED); 420 atomic_set(&vha->vp_state, VP_FAILED);
406 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 421 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
407 } 422 }
408 423
409 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 424 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
410 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 425 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
411 426
412 ha->flags.management_server_logged_in = 0; 427 vha->flags.management_server_logged_in = 0;
413 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 428 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
414 break; 429 break;
415 430
416 case MBA_LOOP_UP: /* Loop Up Event */ 431 case MBA_LOOP_UP: /* Loop Up Event */
@@ -425,59 +440,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
425 } 440 }
426 441
427 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 442 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 ha->host_no, link_speed)); 443 vha->host_no, link_speed));
429 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 444 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 link_speed); 445 link_speed);
431 446
432 ha->flags.management_server_logged_in = 0; 447 vha->flags.management_server_logged_in = 0;
433 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); 448 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
434 break; 449 break;
435 450
436 case MBA_LOOP_DOWN: /* Loop Down Event */ 451 case MBA_LOOP_DOWN: /* Loop Down Event */
437 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 452 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); 453 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
439 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 454 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 mb[1], mb[2], mb[3]); 455 mb[1], mb[2], mb[3]);
441 456
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN); 458 atomic_set(&vha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 459 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE; 460 vha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1); 461 qla2x00_mark_all_devices_lost(vha, 1);
447 } 462 }
448 463
449 if (ha->parent) { 464 if (vha->vp_idx) {
450 atomic_set(&ha->vp_state, VP_FAILED); 465 atomic_set(&vha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 466 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
452 } 467 }
453 468
454 ha->flags.management_server_logged_in = 0; 469 vha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN; 470 ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 471 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
457 break; 472 break;
458 473
459 case MBA_LIP_RESET: /* LIP reset occurred */ 474 case MBA_LIP_RESET: /* LIP reset occurred */
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 475 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 476 vha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 477 qla_printk(KERN_INFO, ha,
463 "LIP reset occurred (%x).\n", mb[1]); 478 "LIP reset occurred (%x).\n", mb[1]);
464 479
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 480 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 481 atomic_set(&vha->loop_state, LOOP_DOWN);
467 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 482 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
468 qla2x00_mark_all_devices_lost(ha, 1); 483 qla2x00_mark_all_devices_lost(vha, 1);
469 } 484 }
470 485
471 if (ha->parent) { 486 if (vha->vp_idx) {
472 atomic_set(&ha->vp_state, VP_FAILED); 487 atomic_set(&vha->vp_state, VP_FAILED);
473 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 488 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
474 } 489 }
475 490
476 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 491 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
477 492
478 ha->operating_mode = LOOP; 493 ha->operating_mode = LOOP;
479 ha->flags.management_server_logged_in = 0; 494 vha->flags.management_server_logged_in = 0;
480 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); 495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
481 break; 496 break;
482 497
483 case MBA_POINT_TO_POINT: /* Point-to-Point */ 498 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -485,33 +500,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
485 break; 500 break;
486 501
487 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 ha->host_no)); 503 vha->host_no));
489 504
490 /* 505 /*
491 * Until there's a transition from loop down to loop up, treat 506 * Until there's a transition from loop down to loop up, treat
492 * this as loop down only. 507 * this as loop down only.
493 */ 508 */
494 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 509 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
495 atomic_set(&ha->loop_state, LOOP_DOWN); 510 atomic_set(&vha->loop_state, LOOP_DOWN);
496 if (!atomic_read(&ha->loop_down_timer)) 511 if (!atomic_read(&vha->loop_down_timer))
497 atomic_set(&ha->loop_down_timer, 512 atomic_set(&vha->loop_down_timer,
498 LOOP_DOWN_TIME); 513 LOOP_DOWN_TIME);
499 qla2x00_mark_all_devices_lost(ha, 1); 514 qla2x00_mark_all_devices_lost(vha, 1);
500 } 515 }
501 516
502 if (ha->parent) { 517 if (vha->vp_idx) {
503 atomic_set(&ha->vp_state, VP_FAILED); 518 atomic_set(&vha->vp_state, VP_FAILED);
504 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 519 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
505 } 520 }
506 521
507 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 522 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
508 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 523 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
509 } 524
510 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 525 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
511 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 526 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
512 527
513 ha->flags.gpsc_supported = 1; 528 ha->flags.gpsc_supported = 1;
514 ha->flags.management_server_logged_in = 0; 529 vha->flags.management_server_logged_in = 0;
515 break; 530 break;
516 531
517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 532 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
@@ -520,134 +535,137 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
520 535
521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 536 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 "received.\n", 537 "received.\n",
523 ha->host_no)); 538 vha->host_no));
524 qla_printk(KERN_INFO, ha, 539 qla_printk(KERN_INFO, ha,
525 "Configuration change detected: value=%x.\n", mb[1]); 540 "Configuration change detected: value=%x.\n", mb[1]);
526 541
527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 542 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
528 atomic_set(&ha->loop_state, LOOP_DOWN); 543 atomic_set(&vha->loop_state, LOOP_DOWN);
529 if (!atomic_read(&ha->loop_down_timer)) 544 if (!atomic_read(&vha->loop_down_timer))
530 atomic_set(&ha->loop_down_timer, 545 atomic_set(&vha->loop_down_timer,
531 LOOP_DOWN_TIME); 546 LOOP_DOWN_TIME);
532 qla2x00_mark_all_devices_lost(ha, 1); 547 qla2x00_mark_all_devices_lost(vha, 1);
533 } 548 }
534 549
535 if (ha->parent) { 550 if (vha->vp_idx) {
536 atomic_set(&ha->vp_state, VP_FAILED); 551 atomic_set(&vha->vp_state, VP_FAILED);
537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
538 } 553 }
539 554
540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 555 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 556 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
542 break; 557 break;
543 558
544 case MBA_PORT_UPDATE: /* Port database update */ 559 case MBA_PORT_UPDATE: /* Port database update */
560 /* Only handle SCNs for our Vport index. */
561 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562 break;
563
545 /* 564 /*
546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 565 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 566 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 567 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 568 */
550 atomic_set(&ha->loop_down_timer, 0); 569 atomic_set(&vha->loop_down_timer, 0);
551 if (atomic_read(&ha->loop_state) != LOOP_DOWN && 570 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
552 atomic_read(&ha->loop_state) != LOOP_DEAD) { 571 atomic_read(&vha->loop_state) != LOOP_DEAD) {
553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 572 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], 573 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
555 mb[2], mb[3])); 574 mb[2], mb[3]));
556 break; 575 break;
557 } 576 }
558 577
559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 578 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 ha->host_no)); 579 vha->host_no));
561 DEBUG(printk(KERN_INFO 580 DEBUG(printk(KERN_INFO
562 "scsi(%ld): Port database changed %04x %04x %04x.\n", 581 "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 ha->host_no, mb[1], mb[2], mb[3])); 582 vha->host_no, mb[1], mb[2], mb[3]));
564 583
565 /* 584 /*
566 * Mark all devices as missing so we will login again. 585 * Mark all devices as missing so we will login again.
567 */ 586 */
568 atomic_set(&ha->loop_state, LOOP_UP); 587 atomic_set(&vha->loop_state, LOOP_UP);
569 588
570 qla2x00_mark_all_devices_lost(ha, 1); 589 qla2x00_mark_all_devices_lost(vha, 1);
571 590
572 ha->flags.rscn_queue_overflow = 1; 591 vha->flags.rscn_queue_overflow = 1;
573 592
574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 593 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 594 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
576 break; 595 break;
577 596
578 case MBA_RSCN_UPDATE: /* State Change Registration */ 597 case MBA_RSCN_UPDATE: /* State Change Registration */
579 /* Check if the Vport has issued a SCR */ 598 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 599 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
581 break; 600 break;
582 /* Only handle SCNs for our Vport index. */ 601 /* Only handle SCNs for our Vport index. */
583 if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) 602 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
584 break; 603 break;
585
586 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 604 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 ha->host_no)); 605 vha->host_no));
588 DEBUG(printk(KERN_INFO 606 DEBUG(printk(KERN_INFO
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 607 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 608 vha->host_no, mb[1], mb[2], mb[3]));
591 609
592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 610 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 611 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
594 ha->d_id.b.al_pa; 612 | vha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 613 if (rscn_entry == host_pid) {
596 DEBUG(printk(KERN_INFO 614 DEBUG(printk(KERN_INFO
597 "scsi(%ld): Ignoring RSCN update to local host " 615 "scsi(%ld): Ignoring RSCN update to local host "
598 "port ID (%06x)\n", 616 "port ID (%06x)\n",
599 ha->host_no, host_pid)); 617 vha->host_no, host_pid));
600 break; 618 break;
601 } 619 }
602 620
603 /* Ignore reserved bits from RSCN-payload. */ 621 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 622 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
605 rscn_queue_index = ha->rscn_in_ptr + 1; 623 rscn_queue_index = vha->rscn_in_ptr + 1;
606 if (rscn_queue_index == MAX_RSCN_COUNT) 624 if (rscn_queue_index == MAX_RSCN_COUNT)
607 rscn_queue_index = 0; 625 rscn_queue_index = 0;
608 if (rscn_queue_index != ha->rscn_out_ptr) { 626 if (rscn_queue_index != vha->rscn_out_ptr) {
609 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; 627 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
610 ha->rscn_in_ptr = rscn_queue_index; 628 vha->rscn_in_ptr = rscn_queue_index;
611 } else { 629 } else {
612 ha->flags.rscn_queue_overflow = 1; 630 vha->flags.rscn_queue_overflow = 1;
613 } 631 }
614 632
615 atomic_set(&ha->loop_state, LOOP_UPDATE); 633 atomic_set(&vha->loop_state, LOOP_UPDATE);
616 atomic_set(&ha->loop_down_timer, 0); 634 atomic_set(&vha->loop_down_timer, 0);
617 ha->flags.management_server_logged_in = 0; 635 vha->flags.management_server_logged_in = 0;
618 636
619 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 637 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
620 set_bit(RSCN_UPDATE, &ha->dpc_flags); 638 set_bit(RSCN_UPDATE, &vha->dpc_flags);
621 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); 639 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
622 break; 640 break;
623 641
624 /* case MBA_RIO_RESPONSE: */ 642 /* case MBA_RIO_RESPONSE: */
625 case MBA_ZIO_RESPONSE: 643 case MBA_ZIO_RESPONSE:
626 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 ha->host_no)); 645 vha->host_no));
628 DEBUG(printk(KERN_INFO 646 DEBUG(printk(KERN_INFO
629 "scsi(%ld): [R|Z]IO update completion.\n", 647 "scsi(%ld): [R|Z]IO update completion.\n",
630 ha->host_no)); 648 vha->host_no));
631 649
632 if (IS_FWI2_CAPABLE(ha)) 650 if (IS_FWI2_CAPABLE(ha))
633 qla24xx_process_response_queue(ha); 651 qla24xx_process_response_queue(rsp);
634 else 652 else
635 qla2x00_process_response_queue(ha); 653 qla2x00_process_response_queue(rsp);
636 break; 654 break;
637 655
638 case MBA_DISCARD_RND_FRAME: 656 case MBA_DISCARD_RND_FRAME:
639 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 657 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
640 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 658 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
641 break; 659 break;
642 660
643 case MBA_TRACE_NOTIFICATION: 661 case MBA_TRACE_NOTIFICATION:
644 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 662 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
645 ha->host_no, mb[1], mb[2])); 663 vha->host_no, mb[1], mb[2]));
646 break; 664 break;
647 665
648 case MBA_ISP84XX_ALERT: 666 case MBA_ISP84XX_ALERT:
649 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 667 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
650 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); 668 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
651 669
652 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 670 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
653 switch (mb[1]) { 671 switch (mb[1]) {
@@ -682,16 +700,22 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
682 break; 700 break;
683 } 701 }
684 702
685 if (!ha->parent && ha->num_vhosts) 703 if (!vha->vp_idx && ha->num_vhosts)
686 qla2x00_alert_all_vps(ha, mb); 704 qla2x00_alert_all_vps(rsp, mb);
687} 705}
688 706
689static void 707static void
690qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 708qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
691{ 709{
692 fc_port_t *fcport = data; 710 fc_port_t *fcport = data;
711 struct scsi_qla_host *vha = fcport->vha;
712 struct qla_hw_data *ha = vha->hw;
713 struct req_que *req = NULL;
693 714
694 if (fcport->ha->max_q_depth <= sdev->queue_depth) 715 req = ha->req_q_map[vha->req_ques[0]];
716 if (!req)
717 return;
718 if (req->max_q_depth <= sdev->queue_depth)
695 return; 719 return;
696 720
697 if (sdev->ordered_tags) 721 if (sdev->ordered_tags)
@@ -703,9 +727,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
703 727
704 fcport->last_ramp_up = jiffies; 728 fcport->last_ramp_up = jiffies;
705 729
706 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 730 DEBUG2(qla_printk(KERN_INFO, ha,
707 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 731 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
708 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 732 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
709 sdev->queue_depth)); 733 sdev->queue_depth));
710} 734}
711 735
@@ -717,20 +741,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
717 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 741 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
718 return; 742 return;
719 743
720 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 744 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
721 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 745 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
722 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 746 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
723 sdev->queue_depth)); 747 sdev->queue_depth));
724} 748}
725 749
726static inline void 750static inline void
727qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) 751qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752 srb_t *sp)
728{ 753{
729 fc_port_t *fcport; 754 fc_port_t *fcport;
730 struct scsi_device *sdev; 755 struct scsi_device *sdev;
731 756
732 sdev = sp->cmd->device; 757 sdev = sp->cmd->device;
733 if (sdev->queue_depth >= ha->max_q_depth) 758 if (sdev->queue_depth >= req->max_q_depth)
734 return; 759 return;
735 760
736 fcport = sp->fcport; 761 fcport = sp->fcport;
@@ -751,25 +776,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
751 * @index: SRB index 776 * @index: SRB index
752 */ 777 */
753static void 778static void
754qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) 779qla2x00_process_completed_request(struct scsi_qla_host *vha,
780 struct req_que *req, uint32_t index)
755{ 781{
756 srb_t *sp; 782 srb_t *sp;
783 struct qla_hw_data *ha = vha->hw;
757 784
758 /* Validate handle. */ 785 /* Validate handle. */
759 if (index >= MAX_OUTSTANDING_COMMANDS) { 786 if (index >= MAX_OUTSTANDING_COMMANDS) {
760 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 787 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
761 ha->host_no, index)); 788 vha->host_no, index));
762 qla_printk(KERN_WARNING, ha, 789 qla_printk(KERN_WARNING, ha,
763 "Invalid SCSI completion handle %d.\n", index); 790 "Invalid SCSI completion handle %d.\n", index);
764 791
765 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
766 return; 793 return;
767 } 794 }
768 795
769 sp = ha->outstanding_cmds[index]; 796 sp = req->outstanding_cmds[index];
770 if (sp) { 797 if (sp) {
771 /* Free outstanding command slot. */ 798 /* Free outstanding command slot. */
772 ha->outstanding_cmds[index] = NULL; 799 req->outstanding_cmds[index] = NULL;
773 800
774 CMD_COMPL_STATUS(sp->cmd) = 0L; 801 CMD_COMPL_STATUS(sp->cmd) = 0L;
775 CMD_SCSI_STATUS(sp->cmd) = 0L; 802 CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +804,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
777 /* Save ISP completion status */ 804 /* Save ISP completion status */
778 sp->cmd->result = DID_OK << 16; 805 sp->cmd->result = DID_OK << 16;
779 806
780 qla2x00_ramp_up_queue_depth(ha, sp); 807 qla2x00_ramp_up_queue_depth(vha, req, sp);
781 qla2x00_sp_compl(ha, sp); 808 qla2x00_sp_compl(ha, sp);
782 } else { 809 } else {
783 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 810 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
784 ha->host_no)); 811 vha->host_no));
785 qla_printk(KERN_WARNING, ha, 812 qla_printk(KERN_WARNING, ha,
786 "Invalid ISP SCSI completion handle\n"); 813 "Invalid ISP SCSI completion handle\n");
787 814
788 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 815 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 } 816 }
790} 817}
791 818
@@ -794,32 +821,36 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
794 * @ha: SCSI driver HA context 821 * @ha: SCSI driver HA context
795 */ 822 */
796void 823void
797qla2x00_process_response_queue(struct scsi_qla_host *ha) 824qla2x00_process_response_queue(struct rsp_que *rsp)
798{ 825{
826 struct scsi_qla_host *vha;
827 struct qla_hw_data *ha = rsp->hw;
799 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 828 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
800 sts_entry_t *pkt; 829 sts_entry_t *pkt;
801 uint16_t handle_cnt; 830 uint16_t handle_cnt;
802 uint16_t cnt; 831 uint16_t cnt;
803 832
804 if (!ha->flags.online) 833 vha = qla2x00_get_rsp_host(rsp);
834
835 if (!vha->flags.online)
805 return; 836 return;
806 837
807 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 838 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
808 pkt = (sts_entry_t *)ha->response_ring_ptr; 839 pkt = (sts_entry_t *)rsp->ring_ptr;
809 840
810 ha->rsp_ring_index++; 841 rsp->ring_index++;
811 if (ha->rsp_ring_index == ha->response_q_length) { 842 if (rsp->ring_index == rsp->length) {
812 ha->rsp_ring_index = 0; 843 rsp->ring_index = 0;
813 ha->response_ring_ptr = ha->response_ring; 844 rsp->ring_ptr = rsp->ring;
814 } else { 845 } else {
815 ha->response_ring_ptr++; 846 rsp->ring_ptr++;
816 } 847 }
817 848
818 if (pkt->entry_status != 0) { 849 if (pkt->entry_status != 0) {
819 DEBUG3(printk(KERN_INFO 850 DEBUG3(printk(KERN_INFO
820 "scsi(%ld): Process error entry.\n", ha->host_no)); 851 "scsi(%ld): Process error entry.\n", vha->host_no));
821 852
822 qla2x00_error_entry(ha, pkt); 853 qla2x00_error_entry(vha, rsp, pkt);
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 854 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
824 wmb(); 855 wmb();
825 continue; 856 continue;
@@ -827,31 +858,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
827 858
828 switch (pkt->entry_type) { 859 switch (pkt->entry_type) {
829 case STATUS_TYPE: 860 case STATUS_TYPE:
830 qla2x00_status_entry(ha, pkt); 861 qla2x00_status_entry(vha, rsp, pkt);
831 break; 862 break;
832 case STATUS_TYPE_21: 863 case STATUS_TYPE_21:
833 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 864 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
834 for (cnt = 0; cnt < handle_cnt; cnt++) { 865 for (cnt = 0; cnt < handle_cnt; cnt++) {
835 qla2x00_process_completed_request(ha, 866 qla2x00_process_completed_request(vha, rsp->req,
836 ((sts21_entry_t *)pkt)->handle[cnt]); 867 ((sts21_entry_t *)pkt)->handle[cnt]);
837 } 868 }
838 break; 869 break;
839 case STATUS_TYPE_22: 870 case STATUS_TYPE_22:
840 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 871 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
841 for (cnt = 0; cnt < handle_cnt; cnt++) { 872 for (cnt = 0; cnt < handle_cnt; cnt++) {
842 qla2x00_process_completed_request(ha, 873 qla2x00_process_completed_request(vha, rsp->req,
843 ((sts22_entry_t *)pkt)->handle[cnt]); 874 ((sts22_entry_t *)pkt)->handle[cnt]);
844 } 875 }
845 break; 876 break;
846 case STATUS_CONT_TYPE: 877 case STATUS_CONT_TYPE:
847 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 878 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
848 break; 879 break;
849 default: 880 default:
850 /* Type Not Supported. */ 881 /* Type Not Supported. */
851 DEBUG4(printk(KERN_WARNING 882 DEBUG4(printk(KERN_WARNING
852 "scsi(%ld): Received unknown response pkt type %x " 883 "scsi(%ld): Received unknown response pkt type %x "
853 "entry status=%x.\n", 884 "entry status=%x.\n",
854 ha->host_no, pkt->entry_type, pkt->entry_status)); 885 vha->host_no, pkt->entry_type, pkt->entry_status));
855 break; 886 break;
856 } 887 }
857 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 888 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +890,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
859 } 890 }
860 891
861 /* Adjust ring index */ 892 /* Adjust ring index */
862 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 893 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
863} 894}
864 895
865static inline void 896static inline void
@@ -881,10 +912,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
881 sp->request_sense_ptr += sense_len; 912 sp->request_sense_ptr += sense_len;
882 sp->request_sense_length -= sense_len; 913 sp->request_sense_length -= sense_len;
883 if (sp->request_sense_length != 0) 914 if (sp->request_sense_length != 0)
884 sp->fcport->ha->status_srb = sp; 915 sp->fcport->vha->status_srb = sp;
885 916
886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 917 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, 918 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
888 cp->device->channel, cp->device->id, cp->device->lun, cp, 919 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number)); 920 cp->serial_number));
890 if (sense_len) 921 if (sense_len)
@@ -898,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 * @pkt: Entry pointer 929 * @pkt: Entry pointer
899 */ 930 */
900static void 931static void
901qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) 932qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
902{ 933{
903 srb_t *sp; 934 srb_t *sp;
904 fc_port_t *fcport; 935 fc_port_t *fcport;
@@ -911,6 +942,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
911 int32_t resid; 942 int32_t resid;
912 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 943 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
913 uint8_t *rsp_info, *sense_data; 944 uint8_t *rsp_info, *sense_data;
945 struct qla_hw_data *ha = vha->hw;
946 struct req_que *req = rsp->req;
914 947
915 sts = (sts_entry_t *) pkt; 948 sts = (sts_entry_t *) pkt;
916 sts24 = (struct sts_entry_24xx *) pkt; 949 sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +957,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
924 957
925 /* Fast path completion. */ 958 /* Fast path completion. */
926 if (comp_status == CS_COMPLETE && scsi_status == 0) { 959 if (comp_status == CS_COMPLETE && scsi_status == 0) {
927 qla2x00_process_completed_request(ha, sts->handle); 960 qla2x00_process_completed_request(vha, req, sts->handle);
928 961
929 return; 962 return;
930 } 963 }
931 964
932 /* Validate handle. */ 965 /* Validate handle. */
933 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 966 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
934 sp = ha->outstanding_cmds[sts->handle]; 967 sp = req->outstanding_cmds[sts->handle];
935 ha->outstanding_cmds[sts->handle] = NULL; 968 req->outstanding_cmds[sts->handle] = NULL;
936 } else 969 } else
937 sp = NULL; 970 sp = NULL;
938 971
939 if (sp == NULL) { 972 if (sp == NULL) {
940 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 973 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
941 ha->host_no)); 974 vha->host_no));
942 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 975 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
943 976
944 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 977 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
945 qla2xxx_wake_dpc(ha); 978 qla2xxx_wake_dpc(vha);
946 return; 979 return;
947 } 980 }
948 cp = sp->cmd; 981 cp = sp->cmd;
949 if (cp == NULL) { 982 if (cp == NULL) {
950 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 983 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
951 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); 984 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
952 qla_printk(KERN_WARNING, ha, 985 qla_printk(KERN_WARNING, ha,
953 "Command is NULL: already returned to OS (sp=%p)\n", sp); 986 "Command is NULL: already returned to OS (sp=%p)\n", sp);
954 987
@@ -987,7 +1020,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
987 if (rsp_info_len > 3 && rsp_info[3]) { 1020 if (rsp_info_len > 3 && rsp_info[3]) {
988 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1021 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
989 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1022 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
990 "retrying command\n", ha->host_no, 1023 "retrying command\n", vha->host_no,
991 cp->device->channel, cp->device->id, 1024 cp->device->channel, cp->device->id,
992 cp->device->lun, rsp_info_len, rsp_info[0], 1025 cp->device->lun, rsp_info_len, rsp_info[0],
993 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 1026 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
@@ -1025,7 +1058,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1025 qla_printk(KERN_INFO, ha, 1058 qla_printk(KERN_INFO, ha,
1026 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1027 "detected (%x of %x bytes)...returning " 1060 "detected (%x of %x bytes)...returning "
1028 "error status.\n", ha->host_no, 1061 "error status.\n", vha->host_no,
1029 cp->device->channel, cp->device->id, 1062 cp->device->channel, cp->device->id,
1030 cp->device->lun, resid, 1063 cp->device->lun, resid,
1031 scsi_bufflen(cp)); 1064 scsi_bufflen(cp));
@@ -1039,7 +1072,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1072 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1040 DEBUG2(printk(KERN_INFO 1073 DEBUG2(printk(KERN_INFO
1041 "scsi(%ld): QUEUE FULL status detected " 1074 "scsi(%ld): QUEUE FULL status detected "
1042 "0x%x-0x%x.\n", ha->host_no, comp_status, 1075 "0x%x-0x%x.\n", vha->host_no, comp_status,
1043 scsi_status)); 1076 scsi_status));
1044 1077
1045 /* Adjust queue depth for all luns on the port. */ 1078 /* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1111,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1078 DEBUG2(printk(KERN_INFO 1111 DEBUG2(printk(KERN_INFO
1079 "scsi(%ld:%d:%d) UNDERRUN status detected " 1112 "scsi(%ld:%d:%d) UNDERRUN status detected "
1080 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1113 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1081 "os_underflow=0x%x\n", ha->host_no, 1114 "os_underflow=0x%x\n", vha->host_no,
1082 cp->device->id, cp->device->lun, comp_status, 1115 cp->device->id, cp->device->lun, comp_status,
1083 scsi_status, resid_len, resid, cp->cmnd[0], 1116 scsi_status, resid_len, resid, cp->cmnd[0],
1084 cp->underflow)); 1117 cp->underflow));
@@ -1095,7 +1128,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1095 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1128 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1096 DEBUG2(printk(KERN_INFO 1129 DEBUG2(printk(KERN_INFO
1097 "scsi(%ld): QUEUE FULL status detected " 1130 "scsi(%ld): QUEUE FULL status detected "
1098 "0x%x-0x%x.\n", ha->host_no, comp_status, 1131 "0x%x-0x%x.\n", vha->host_no, comp_status,
1099 scsi_status)); 1132 scsi_status));
1100 1133
1101 /* 1134 /*
@@ -1125,10 +1158,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1125 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1158 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1126 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1159 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1127 "frame(s) detected (%x of %x bytes)..." 1160 "frame(s) detected (%x of %x bytes)..."
1128 "retrying command.\n", ha->host_no, 1161 "retrying command.\n",
1129 cp->device->channel, cp->device->id, 1162 vha->host_no, cp->device->channel,
1130 cp->device->lun, resid, 1163 cp->device->id, cp->device->lun, resid,
1131 scsi_bufflen(cp))); 1164 scsi_bufflen(cp)));
1132 1165
1133 cp->result = DID_BUS_BUSY << 16; 1166 cp->result = DID_BUS_BUSY << 16;
1134 break; 1167 break;
@@ -1140,7 +1173,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1140 qla_printk(KERN_INFO, ha, 1173 qla_printk(KERN_INFO, ha,
1141 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1174 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1142 "detected (%x of %x bytes)...returning " 1175 "detected (%x of %x bytes)...returning "
1143 "error status.\n", ha->host_no, 1176 "error status.\n", vha->host_no,
1144 cp->device->channel, cp->device->id, 1177 cp->device->channel, cp->device->id,
1145 cp->device->lun, resid, 1178 cp->device->lun, resid,
1146 scsi_bufflen(cp)); 1179 scsi_bufflen(cp));
@@ -1157,7 +1190,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1157 case CS_DATA_OVERRUN: 1190 case CS_DATA_OVERRUN:
1158 DEBUG2(printk(KERN_INFO 1191 DEBUG2(printk(KERN_INFO
1159 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1192 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1160 ha->host_no, cp->device->id, cp->device->lun, comp_status, 1193 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1161 scsi_status)); 1194 scsi_status));
1162 DEBUG2(printk(KERN_INFO 1195 DEBUG2(printk(KERN_INFO
1163 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1196 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,7 +1216,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1183 */ 1216 */
1184 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1217 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1185 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1218 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1186 ha->host_no, cp->device->id, cp->device->lun, 1219 vha->host_no, cp->device->id, cp->device->lun,
1187 cp->serial_number, comp_status, 1220 cp->serial_number, comp_status,
1188 atomic_read(&fcport->state))); 1221 atomic_read(&fcport->state)));
1189 1222
@@ -1194,13 +1227,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1194 */ 1227 */
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1228 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1196 if (atomic_read(&fcport->state) == FCS_ONLINE) 1229 if (atomic_read(&fcport->state) == FCS_ONLINE)
1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1230 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1198 break; 1231 break;
1199 1232
1200 case CS_RESET: 1233 case CS_RESET:
1201 DEBUG2(printk(KERN_INFO 1234 DEBUG2(printk(KERN_INFO
1202 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1235 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1203 ha->host_no, comp_status, scsi_status)); 1236 vha->host_no, comp_status, scsi_status));
1204 1237
1205 cp->result = DID_RESET << 16; 1238 cp->result = DID_RESET << 16;
1206 break; 1239 break;
@@ -1213,7 +1246,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1213 */ 1246 */
1214 DEBUG2(printk(KERN_INFO 1247 DEBUG2(printk(KERN_INFO
1215 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1248 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1216 ha->host_no, comp_status, scsi_status)); 1249 vha->host_no, comp_status, scsi_status));
1217 1250
1218 cp->result = DID_RESET << 16; 1251 cp->result = DID_RESET << 16;
1219 break; 1252 break;
@@ -1229,25 +1262,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1229 if (IS_FWI2_CAPABLE(ha)) { 1262 if (IS_FWI2_CAPABLE(ha)) {
1230 DEBUG2(printk(KERN_INFO 1263 DEBUG2(printk(KERN_INFO
1231 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1264 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1232 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1265 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1233 cp->device->id, cp->device->lun, comp_status, 1266 cp->device->id, cp->device->lun, comp_status,
1234 scsi_status)); 1267 scsi_status));
1235 break; 1268 break;
1236 } 1269 }
1237 DEBUG2(printk(KERN_INFO 1270 DEBUG2(printk(KERN_INFO
1238 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1271 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1239 "sflags=%x.\n", ha->host_no, cp->device->channel, 1272 "sflags=%x.\n", vha->host_no, cp->device->channel,
1240 cp->device->id, cp->device->lun, comp_status, scsi_status, 1273 cp->device->id, cp->device->lun, comp_status, scsi_status,
1241 le16_to_cpu(sts->status_flags))); 1274 le16_to_cpu(sts->status_flags)));
1242 1275
1243 /* Check to see if logout occurred. */ 1276 /* Check to see if logout occurred. */
1244 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1277 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1245 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1278 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1246 break; 1279 break;
1247 1280
1248 default: 1281 default:
1249 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1282 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1250 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); 1283 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1251 qla_printk(KERN_INFO, ha, 1284 qla_printk(KERN_INFO, ha,
1252 "Unknown status detected 0x%x-0x%x.\n", 1285 "Unknown status detected 0x%x-0x%x.\n",
1253 comp_status, scsi_status); 1286 comp_status, scsi_status);
@@ -1257,7 +1290,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1257 } 1290 }
1258 1291
1259 /* Place command on done queue. */ 1292 /* Place command on done queue. */
1260 if (ha->status_srb == NULL) 1293 if (vha->status_srb == NULL)
1261 qla2x00_sp_compl(ha, sp); 1294 qla2x00_sp_compl(ha, sp);
1262} 1295}
1263 1296
@@ -1269,10 +1302,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1269 * Extended sense data. 1302 * Extended sense data.
1270 */ 1303 */
1271static void 1304static void
1272qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) 1305qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1273{ 1306{
1274 uint8_t sense_sz = 0; 1307 uint8_t sense_sz = 0;
1275 srb_t *sp = ha->status_srb; 1308 struct qla_hw_data *ha = vha->hw;
1309 srb_t *sp = vha->status_srb;
1276 struct scsi_cmnd *cp; 1310 struct scsi_cmnd *cp;
1277 1311
1278 if (sp != NULL && sp->request_sense_length != 0) { 1312 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1284,7 +1318,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1284 "cmd is NULL: already returned to OS (sp=%p)\n", 1318 "cmd is NULL: already returned to OS (sp=%p)\n",
1285 sp); 1319 sp);
1286 1320
1287 ha->status_srb = NULL; 1321 vha->status_srb = NULL;
1288 return; 1322 return;
1289 } 1323 }
1290 1324
@@ -1305,7 +1339,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1305 1339
1306 /* Place command on done queue. */ 1340 /* Place command on done queue. */
1307 if (sp->request_sense_length == 0) { 1341 if (sp->request_sense_length == 0) {
1308 ha->status_srb = NULL; 1342 vha->status_srb = NULL;
1309 qla2x00_sp_compl(ha, sp); 1343 qla2x00_sp_compl(ha, sp);
1310 } 1344 }
1311 } 1345 }
@@ -1317,10 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1317 * @pkt: Entry pointer 1351 * @pkt: Entry pointer
1318 */ 1352 */
1319static void 1353static void
1320qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 1354qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1321{ 1355{
1322 srb_t *sp; 1356 srb_t *sp;
1323 1357 struct qla_hw_data *ha = vha->hw;
1358 struct req_que *req = rsp->req;
1324#if defined(QL_DEBUG_LEVEL_2) 1359#if defined(QL_DEBUG_LEVEL_2)
1325 if (pkt->entry_status & RF_INV_E_ORDER) 1360 if (pkt->entry_status & RF_INV_E_ORDER)
1326 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1361 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1339,13 +1374,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1339 1374
1340 /* Validate handle. */ 1375 /* Validate handle. */
1341 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1376 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1342 sp = ha->outstanding_cmds[pkt->handle]; 1377 sp = req->outstanding_cmds[pkt->handle];
1343 else 1378 else
1344 sp = NULL; 1379 sp = NULL;
1345 1380
1346 if (sp) { 1381 if (sp) {
1347 /* Free outstanding command slot. */ 1382 /* Free outstanding command slot. */
1348 ha->outstanding_cmds[pkt->handle] = NULL; 1383 req->outstanding_cmds[pkt->handle] = NULL;
1349 1384
1350 /* Bad payload or header */ 1385 /* Bad payload or header */
1351 if (pkt->entry_status & 1386 if (pkt->entry_status &
@@ -1362,12 +1397,12 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1362 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1397 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1363 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1398 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1364 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1399 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1365 ha->host_no)); 1400 vha->host_no));
1366 qla_printk(KERN_WARNING, ha, 1401 qla_printk(KERN_WARNING, ha,
1367 "Error entry - invalid handle\n"); 1402 "Error entry - invalid handle\n");
1368 1403
1369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1404 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1370 qla2xxx_wake_dpc(ha); 1405 qla2xxx_wake_dpc(vha);
1371 } 1406 }
1372} 1407}
1373 1408
@@ -1377,10 +1412,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1377 * @mb0: Mailbox0 register 1412 * @mb0: Mailbox0 register
1378 */ 1413 */
1379static void 1414static void
1380qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 1415qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1381{ 1416{
1382 uint16_t cnt; 1417 uint16_t cnt;
1383 uint16_t __iomem *wptr; 1418 uint16_t __iomem *wptr;
1419 struct qla_hw_data *ha = vha->hw;
1384 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1420 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1385 1421
1386 /* Load return mailbox registers. */ 1422 /* Load return mailbox registers. */
@@ -1395,10 +1431,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1395 1431
1396 if (ha->mcp) { 1432 if (ha->mcp) {
1397 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1433 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1398 __func__, ha->host_no, ha->mcp->mb[0])); 1434 __func__, vha->host_no, ha->mcp->mb[0]));
1399 } else { 1435 } else {
1400 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1436 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1401 __func__, ha->host_no)); 1437 __func__, vha->host_no));
1402 } 1438 }
1403} 1439}
1404 1440
@@ -1407,30 +1443,33 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1407 * @ha: SCSI driver HA context 1443 * @ha: SCSI driver HA context
1408 */ 1444 */
1409void 1445void
1410qla24xx_process_response_queue(struct scsi_qla_host *ha) 1446qla24xx_process_response_queue(struct rsp_que *rsp)
1411{ 1447{
1412 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1448 struct qla_hw_data *ha = rsp->hw;
1413 struct sts_entry_24xx *pkt; 1449 struct sts_entry_24xx *pkt;
1450 struct scsi_qla_host *vha;
1451
1452 vha = qla2x00_get_rsp_host(rsp);
1414 1453
1415 if (!ha->flags.online) 1454 if (!vha->flags.online)
1416 return; 1455 return;
1417 1456
1418 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 1457 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1419 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; 1458 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1420 1459
1421 ha->rsp_ring_index++; 1460 rsp->ring_index++;
1422 if (ha->rsp_ring_index == ha->response_q_length) { 1461 if (rsp->ring_index == rsp->length) {
1423 ha->rsp_ring_index = 0; 1462 rsp->ring_index = 0;
1424 ha->response_ring_ptr = ha->response_ring; 1463 rsp->ring_ptr = rsp->ring;
1425 } else { 1464 } else {
1426 ha->response_ring_ptr++; 1465 rsp->ring_ptr++;
1427 } 1466 }
1428 1467
1429 if (pkt->entry_status != 0) { 1468 if (pkt->entry_status != 0) {
1430 DEBUG3(printk(KERN_INFO 1469 DEBUG3(printk(KERN_INFO
1431 "scsi(%ld): Process error entry.\n", ha->host_no)); 1470 "scsi(%ld): Process error entry.\n", vha->host_no));
1432 1471
1433 qla2x00_error_entry(ha, (sts_entry_t *) pkt); 1472 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1434 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1473 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1435 wmb(); 1474 wmb();
1436 continue; 1475 continue;
@@ -1438,13 +1477,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1438 1477
1439 switch (pkt->entry_type) { 1478 switch (pkt->entry_type) {
1440 case STATUS_TYPE: 1479 case STATUS_TYPE:
1441 qla2x00_status_entry(ha, pkt); 1480 qla2x00_status_entry(vha, rsp, pkt);
1442 break; 1481 break;
1443 case STATUS_CONT_TYPE: 1482 case STATUS_CONT_TYPE:
1444 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 1483 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1445 break; 1484 break;
1446 case VP_RPT_ID_IOCB_TYPE: 1485 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha, 1486 qla24xx_report_id_acquisition(vha,
1448 (struct vp_rpt_id_entry_24xx *)pkt); 1487 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break; 1488 break;
1450 default: 1489 default:
@@ -1452,7 +1491,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1452 DEBUG4(printk(KERN_WARNING 1491 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x " 1492 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n", 1493 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status)); 1494 vha->host_no, pkt->entry_type, pkt->entry_status));
1456 break; 1495 break;
1457 } 1496 }
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1497 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1460,14 +1499,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1460 } 1499 }
1461 1500
1462 /* Adjust ring index */ 1501 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index); 1502 ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1464} 1503}
1465 1504
1466static void 1505static void
1467qla2xxx_check_risc_status(scsi_qla_host_t *ha) 1506qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1468{ 1507{
1469 int rval; 1508 int rval;
1470 uint32_t cnt; 1509 uint32_t cnt;
1510 struct qla_hw_data *ha = vha->hw;
1471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1472 1512
1473 if (!IS_QLA25XX(ha)) 1513 if (!IS_QLA25XX(ha))
@@ -1521,25 +1561,29 @@ done:
1521irqreturn_t 1561irqreturn_t
1522qla24xx_intr_handler(int irq, void *dev_id) 1562qla24xx_intr_handler(int irq, void *dev_id)
1523{ 1563{
1524 scsi_qla_host_t *ha; 1564 scsi_qla_host_t *vha;
1565 struct qla_hw_data *ha;
1525 struct device_reg_24xx __iomem *reg; 1566 struct device_reg_24xx __iomem *reg;
1526 int status; 1567 int status;
1527 unsigned long iter; 1568 unsigned long iter;
1528 uint32_t stat; 1569 uint32_t stat;
1529 uint32_t hccr; 1570 uint32_t hccr;
1530 uint16_t mb[4]; 1571 uint16_t mb[4];
1572 struct rsp_que *rsp;
1531 1573
1532 ha = (scsi_qla_host_t *) dev_id; 1574 rsp = (struct rsp_que *) dev_id;
1533 if (!ha) { 1575 if (!rsp) {
1534 printk(KERN_INFO 1576 printk(KERN_INFO
1535 "%s(): NULL host pointer\n", __func__); 1577 "%s(): NULL response queue pointer\n", __func__);
1536 return IRQ_NONE; 1578 return IRQ_NONE;
1537 } 1579 }
1538 1580
1581 ha = rsp->hw;
1539 reg = &ha->iobase->isp24; 1582 reg = &ha->iobase->isp24;
1540 status = 0; 1583 status = 0;
1541 1584
1542 spin_lock(&ha->hardware_lock); 1585 spin_lock(&ha->hardware_lock);
1586 vha = qla2x00_get_rsp_host(rsp);
1543 for (iter = 50; iter--; ) { 1587 for (iter = 50; iter--; ) {
1544 stat = RD_REG_DWORD(&reg->host_status); 1588 stat = RD_REG_DWORD(&reg->host_status);
1545 if (stat & HSRX_RISC_PAUSED) { 1589 if (stat & HSRX_RISC_PAUSED) {
@@ -1547,7 +1591,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1547 break; 1591 break;
1548 1592
1549 if (ha->hw_event_pause_errors == 0) 1593 if (ha->hw_event_pause_errors == 0)
1550 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1551 0, MSW(stat), LSW(stat)); 1595 0, MSW(stat), LSW(stat));
1552 else if (ha->hw_event_pause_errors < 0xffffffff) 1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1553 ha->hw_event_pause_errors++; 1597 ha->hw_event_pause_errors++;
@@ -1557,10 +1601,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
1557 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1558 "Dumping firmware!\n", hccr); 1602 "Dumping firmware!\n", hccr);
1559 1603
1560 qla2xxx_check_risc_status(ha); 1604 qla2xxx_check_risc_status(vha);
1561 1605
1562 ha->isp_ops->fw_dump(ha, 1); 1606 ha->isp_ops->fw_dump(vha, 1);
1563 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1607 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1564 break; 1608 break;
1565 } else if ((stat & HSRX_RISC_INT) == 0) 1609 } else if ((stat & HSRX_RISC_INT) == 0)
1566 break; 1610 break;
@@ -1570,7 +1614,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1570 case 0x2: 1614 case 0x2:
1571 case 0x10: 1615 case 0x10:
1572 case 0x11: 1616 case 0x11:
1573 qla24xx_mbx_completion(ha, MSW(stat)); 1617 qla24xx_mbx_completion(vha, MSW(stat));
1574 status |= MBX_INTERRUPT; 1618 status |= MBX_INTERRUPT;
1575 1619
1576 break; 1620 break;
@@ -1579,15 +1623,16 @@ qla24xx_intr_handler(int irq, void *dev_id)
1579 mb[1] = RD_REG_WORD(&reg->mailbox1); 1623 mb[1] = RD_REG_WORD(&reg->mailbox1);
1580 mb[2] = RD_REG_WORD(&reg->mailbox2); 1624 mb[2] = RD_REG_WORD(&reg->mailbox2);
1581 mb[3] = RD_REG_WORD(&reg->mailbox3); 1625 mb[3] = RD_REG_WORD(&reg->mailbox3);
1582 qla2x00_async_event(ha, mb); 1626 qla2x00_async_event(vha, rsp, mb);
1583 break; 1627 break;
1584 case 0x13: 1628 case 0x13:
1585 qla24xx_process_response_queue(ha); 1629 case 0x14:
1630 qla24xx_process_response_queue(rsp);
1586 break; 1631 break;
1587 default: 1632 default:
1588 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1633 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1589 "(%d).\n", 1634 "(%d).\n",
1590 ha->host_no, stat & 0xff)); 1635 vha->host_no, stat & 0xff));
1591 break; 1636 break;
1592 } 1637 }
1593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1638 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1607,15 +1652,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
1607static irqreturn_t 1652static irqreturn_t
1608qla24xx_msix_rsp_q(int irq, void *dev_id) 1653qla24xx_msix_rsp_q(int irq, void *dev_id)
1609{ 1654{
1610 scsi_qla_host_t *ha; 1655 struct qla_hw_data *ha;
1656 struct rsp_que *rsp;
1611 struct device_reg_24xx __iomem *reg; 1657 struct device_reg_24xx __iomem *reg;
1612 1658
1613 ha = dev_id; 1659 rsp = (struct rsp_que *) dev_id;
1660 if (!rsp) {
1661 printk(KERN_INFO
1662 "%s(): NULL response queue pointer\n", __func__);
1663 return IRQ_NONE;
1664 }
1665 ha = rsp->hw;
1614 reg = &ha->iobase->isp24; 1666 reg = &ha->iobase->isp24;
1615 1667
1616 spin_lock_irq(&ha->hardware_lock); 1668 spin_lock_irq(&ha->hardware_lock);
1617 1669
1618 qla24xx_process_response_queue(ha); 1670 qla24xx_process_response_queue(rsp);
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1671 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1620 1672
1621 spin_unlock_irq(&ha->hardware_lock); 1673 spin_unlock_irq(&ha->hardware_lock);
@@ -1624,20 +1676,64 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1624} 1676}
1625 1677
1626static irqreturn_t 1678static irqreturn_t
1679qla25xx_msix_rsp_q(int irq, void *dev_id)
1680{
1681 struct qla_hw_data *ha;
1682 struct rsp_que *rsp;
1683 struct device_reg_24xx __iomem *reg;
1684 uint16_t msix_disabled_hccr = 0;
1685
1686 rsp = (struct rsp_que *) dev_id;
1687 if (!rsp) {
1688 printk(KERN_INFO
1689 "%s(): NULL response queue pointer\n", __func__);
1690 return IRQ_NONE;
1691 }
1692 ha = rsp->hw;
1693 reg = &ha->iobase->isp24;
1694
1695 spin_lock_irq(&ha->hardware_lock);
1696
1697 msix_disabled_hccr = rsp->options;
1698 if (!rsp->id)
1699 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1700 else
1701 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1702
1703 qla24xx_process_response_queue(rsp);
1704
1705 if (!msix_disabled_hccr)
1706 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1707
1708 spin_unlock_irq(&ha->hardware_lock);
1709
1710 return IRQ_HANDLED;
1711}
1712
1713static irqreturn_t
1627qla24xx_msix_default(int irq, void *dev_id) 1714qla24xx_msix_default(int irq, void *dev_id)
1628{ 1715{
1629 scsi_qla_host_t *ha; 1716 scsi_qla_host_t *vha;
1717 struct qla_hw_data *ha;
1718 struct rsp_que *rsp;
1630 struct device_reg_24xx __iomem *reg; 1719 struct device_reg_24xx __iomem *reg;
1631 int status; 1720 int status;
1632 uint32_t stat; 1721 uint32_t stat;
1633 uint32_t hccr; 1722 uint32_t hccr;
1634 uint16_t mb[4]; 1723 uint16_t mb[4];
1635 1724
1636 ha = dev_id; 1725 rsp = (struct rsp_que *) dev_id;
1726 if (!rsp) {
1727 DEBUG(printk(
1728 "%s(): NULL response queue pointer\n", __func__));
1729 return IRQ_NONE;
1730 }
1731 ha = rsp->hw;
1637 reg = &ha->iobase->isp24; 1732 reg = &ha->iobase->isp24;
1638 status = 0; 1733 status = 0;
1639 1734
1640 spin_lock_irq(&ha->hardware_lock); 1735 spin_lock_irq(&ha->hardware_lock);
1736 vha = qla2x00_get_rsp_host(rsp);
1641 do { 1737 do {
1642 stat = RD_REG_DWORD(&reg->host_status); 1738 stat = RD_REG_DWORD(&reg->host_status);
1643 if (stat & HSRX_RISC_PAUSED) { 1739 if (stat & HSRX_RISC_PAUSED) {
@@ -1645,7 +1741,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1645 break; 1741 break;
1646 1742
1647 if (ha->hw_event_pause_errors == 0) 1743 if (ha->hw_event_pause_errors == 0)
1648 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1649 0, MSW(stat), LSW(stat)); 1745 0, MSW(stat), LSW(stat));
1650 else if (ha->hw_event_pause_errors < 0xffffffff) 1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1651 ha->hw_event_pause_errors++; 1747 ha->hw_event_pause_errors++;
@@ -1655,10 +1751,10 @@ qla24xx_msix_default(int irq, void *dev_id)
1655 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1656 "Dumping firmware!\n", hccr); 1752 "Dumping firmware!\n", hccr);
1657 1753
1658 qla2xxx_check_risc_status(ha); 1754 qla2xxx_check_risc_status(vha);
1659 1755
1660 ha->isp_ops->fw_dump(ha, 1); 1756 ha->isp_ops->fw_dump(vha, 1);
1661 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1662 break; 1758 break;
1663 } else if ((stat & HSRX_RISC_INT) == 0) 1759 } else if ((stat & HSRX_RISC_INT) == 0)
1664 break; 1760 break;
@@ -1668,7 +1764,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1668 case 0x2: 1764 case 0x2:
1669 case 0x10: 1765 case 0x10:
1670 case 0x11: 1766 case 0x11:
1671 qla24xx_mbx_completion(ha, MSW(stat)); 1767 qla24xx_mbx_completion(vha, MSW(stat));
1672 status |= MBX_INTERRUPT; 1768 status |= MBX_INTERRUPT;
1673 1769
1674 break; 1770 break;
@@ -1677,15 +1773,16 @@ qla24xx_msix_default(int irq, void *dev_id)
1677 mb[1] = RD_REG_WORD(&reg->mailbox1); 1773 mb[1] = RD_REG_WORD(&reg->mailbox1);
1678 mb[2] = RD_REG_WORD(&reg->mailbox2); 1774 mb[2] = RD_REG_WORD(&reg->mailbox2);
1679 mb[3] = RD_REG_WORD(&reg->mailbox3); 1775 mb[3] = RD_REG_WORD(&reg->mailbox3);
1680 qla2x00_async_event(ha, mb); 1776 qla2x00_async_event(vha, rsp, mb);
1681 break; 1777 break;
1682 case 0x13: 1778 case 0x13:
1683 qla24xx_process_response_queue(ha); 1779 case 0x14:
1780 qla24xx_process_response_queue(rsp);
1684 break; 1781 break;
1685 default: 1782 default:
1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1783 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1687 "(%d).\n", 1784 "(%d).\n",
1688 ha->host_no, stat & 0xff)); 1785 vha->host_no, stat & 0xff));
1689 break; 1786 break;
1690 } 1787 }
1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1788 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1710,70 +1807,138 @@ struct qla_init_msix_entry {
1710 irq_handler_t handler; 1807 irq_handler_t handler;
1711}; 1808};
1712 1809
1713static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1810static struct qla_init_msix_entry base_queue = {
1714 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, 1811 .entry = 0,
1715 "qla2xxx (default)", qla24xx_msix_default }, 1812 .index = 0,
1813 .name = "qla2xxx (default)",
1814 .handler = qla24xx_msix_default,
1815};
1816
1817static struct qla_init_msix_entry base_rsp_queue = {
1818 .entry = 1,
1819 .index = 1,
1820 .name = "qla2xxx (rsp_q)",
1821 .handler = qla24xx_msix_rsp_q,
1822};
1716 1823
1717 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, 1824static struct qla_init_msix_entry multi_rsp_queue = {
1718 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 1825 .entry = 1,
1826 .index = 1,
1827 .name = "qla2xxx (multi_q)",
1828 .handler = qla25xx_msix_rsp_q,
1719}; 1829};
1720 1830
1721static void 1831static void
1722qla24xx_disable_msix(scsi_qla_host_t *ha) 1832qla24xx_disable_msix(struct qla_hw_data *ha)
1723{ 1833{
1724 int i; 1834 int i;
1725 struct qla_msix_entry *qentry; 1835 struct qla_msix_entry *qentry;
1726 1836
1727 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1837 for (i = 0; i < ha->msix_count; i++) {
1728 qentry = &ha->msix_entries[imsix_entries[i].index]; 1838 qentry = &ha->msix_entries[i];
1729 if (qentry->have_irq) 1839 if (qentry->have_irq)
1730 free_irq(qentry->msix_vector, ha); 1840 free_irq(qentry->vector, qentry->rsp);
1731 } 1841 }
1732 pci_disable_msix(ha->pdev); 1842 pci_disable_msix(ha->pdev);
1843 kfree(ha->msix_entries);
1844 ha->msix_entries = NULL;
1845 ha->flags.msix_enabled = 0;
1733} 1846}
1734 1847
1735static int 1848static int
1736qla24xx_enable_msix(scsi_qla_host_t *ha) 1849qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1737{ 1850{
1738 int i, ret; 1851 int i, ret;
1739 struct msix_entry entries[QLA_MSIX_ENTRIES]; 1852 struct msix_entry *entries;
1740 struct qla_msix_entry *qentry; 1853 struct qla_msix_entry *qentry;
1854 struct qla_init_msix_entry *msix_queue;
1741 1855
1742 for (i = 0; i < QLA_MSIX_ENTRIES; i++) 1856 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1743 entries[i].entry = imsix_entries[i].entry; 1857 GFP_KERNEL);
1858 if (!entries)
1859 return -ENOMEM;
1744 1860
1745 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); 1861 for (i = 0; i < ha->msix_count; i++)
1862 entries[i].entry = i;
1863
1864 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1746 if (ret) { 1865 if (ret) {
1747 qla_printk(KERN_WARNING, ha, 1866 qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Failed to enable support -- %d/%d\n", 1867 "MSI-X: Failed to enable support -- %d/%d\n"
1749 QLA_MSIX_ENTRIES, ret); 1868 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1869 ha->msix_count = ret;
1870 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1871 if (ret) {
1872 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1873 " support, giving up -- %d/%d\n",
1874 ha->msix_count, ret);
1875 goto msix_out;
1876 }
1877 ha->max_queues = ha->msix_count - 1;
1878 }
1879 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1880 ha->msix_count, GFP_KERNEL);
1881 if (!ha->msix_entries) {
1882 ret = -ENOMEM;
1750 goto msix_out; 1883 goto msix_out;
1751 } 1884 }
1752 ha->flags.msix_enabled = 1; 1885 ha->flags.msix_enabled = 1;
1753 1886
1754 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1887 for (i = 0; i < ha->msix_count; i++) {
1755 qentry = &ha->msix_entries[imsix_entries[i].index]; 1888 qentry = &ha->msix_entries[i];
1756 qentry->msix_vector = entries[i].vector; 1889 qentry->vector = entries[i].vector;
1757 qentry->msix_entry = entries[i].entry; 1890 qentry->entry = entries[i].entry;
1758 qentry->have_irq = 0; 1891 qentry->have_irq = 0;
1759 ret = request_irq(qentry->msix_vector, 1892 qentry->rsp = NULL;
1760 imsix_entries[i].handler, 0, imsix_entries[i].name, ha); 1893 }
1761 if (ret) { 1894
1762 qla_printk(KERN_WARNING, ha, 1895 /* Enable MSI-X for AENs for queue 0 */
1763 "MSI-X: Unable to register handler -- %x/%d.\n", 1896 qentry = &ha->msix_entries[0];
1764 imsix_entries[i].index, ret); 1897 ret = request_irq(qentry->vector, base_queue.handler, 0,
1765 qla24xx_disable_msix(ha); 1898 base_queue.name, rsp);
1766 goto msix_out; 1899 if (ret) {
1767 } 1900 qla_printk(KERN_WARNING, ha,
1768 qentry->have_irq = 1; 1901 "MSI-X: Unable to register handler -- %x/%d.\n",
1902 qentry->vector, ret);
1903 qla24xx_disable_msix(ha);
1904 goto msix_out;
1769 } 1905 }
1906 qentry->have_irq = 1;
1907 qentry->rsp = rsp;
1908
1909 /* Enable MSI-X vector for response queue update for queue 0 */
1910 if (ha->max_queues > 1 && ha->mqiobase) {
1911 ha->mqenable = 1;
1912 msix_queue = &multi_rsp_queue;
1913 qla_printk(KERN_INFO, ha,
1914 "MQ enabled, Number of Queue Resources: %d \n",
1915 ha->max_queues);
1916 } else {
1917 ha->mqenable = 0;
1918 msix_queue = &base_rsp_queue;
1919 }
1920
1921 qentry = &ha->msix_entries[1];
1922 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1923 msix_queue->name, rsp);
1924 if (ret) {
1925 qla_printk(KERN_WARNING, ha,
1926 "MSI-X: Unable to register handler -- %x/%d.\n",
1927 qentry->vector, ret);
1928 qla24xx_disable_msix(ha);
1929 ha->mqenable = 0;
1930 goto msix_out;
1931 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1770 1934
1771msix_out: 1935msix_out:
1936 kfree(entries);
1772 return ret; 1937 return ret;
1773} 1938}
1774 1939
1775int 1940int
1776qla2x00_request_irqs(scsi_qla_host_t *ha) 1941qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1777{ 1942{
1778 int ret; 1943 int ret;
1779 device_reg_t __iomem *reg = ha->iobase; 1944 device_reg_t __iomem *reg = ha->iobase;
@@ -1782,11 +1947,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1782 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1783 goto skip_msix; 1948 goto skip_msix;
1784 1949
1785 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1786 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1951 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1787 DEBUG2(qla_printk(KERN_WARNING, ha, 1952 DEBUG2(qla_printk(KERN_WARNING, ha,
1788 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1953 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1789 ha->chip_revision, ha->fw_attributes)); 1954 ha->pdev->revision, ha->fw_attributes));
1790 1955
1791 goto skip_msix; 1956 goto skip_msix;
1792 } 1957 }
@@ -1803,7 +1968,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1803 goto skip_msi; 1968 goto skip_msi;
1804 } 1969 }
1805 1970
1806 ret = qla24xx_enable_msix(ha); 1971 ret = qla24xx_enable_msix(ha, rsp);
1807 if (!ret) { 1972 if (!ret) {
1808 DEBUG2(qla_printk(KERN_INFO, ha, 1973 DEBUG2(qla_printk(KERN_INFO, ha,
1809 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1974 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1825,7 +1990,7 @@ skip_msix:
1825skip_msi: 1990skip_msi:
1826 1991
1827 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1992 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1828 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1993 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1829 if (ret) { 1994 if (ret) {
1830 qla_printk(KERN_WARNING, ha, 1995 qla_printk(KERN_WARNING, ha,
1831 "Failed to reserve interrupt %d already in use.\n", 1996 "Failed to reserve interrupt %d already in use.\n",
@@ -1833,10 +1998,8 @@ skip_msi:
1833 goto fail; 1998 goto fail;
1834 } 1999 }
1835 ha->flags.inta_enabled = 1; 2000 ha->flags.inta_enabled = 1;
1836 ha->host->irq = ha->pdev->irq;
1837clear_risc_ints: 2001clear_risc_ints:
1838 2002
1839 ha->isp_ops->disable_intrs(ha);
1840 spin_lock_irq(&ha->hardware_lock); 2003 spin_lock_irq(&ha->hardware_lock);
1841 if (IS_FWI2_CAPABLE(ha)) { 2004 if (IS_FWI2_CAPABLE(ha)) {
1842 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1853,13 +2016,74 @@ fail:
1853} 2016}
1854 2017
1855void 2018void
1856qla2x00_free_irqs(scsi_qla_host_t *ha) 2019qla2x00_free_irqs(scsi_qla_host_t *vha)
1857{ 2020{
2021 struct qla_hw_data *ha = vha->hw;
2022 struct rsp_que *rsp = ha->rsp_q_map[0];
1858 2023
1859 if (ha->flags.msix_enabled) 2024 if (ha->flags.msix_enabled)
1860 qla24xx_disable_msix(ha); 2025 qla24xx_disable_msix(ha);
1861 else if (ha->flags.inta_enabled) { 2026 else if (ha->flags.inta_enabled) {
1862 free_irq(ha->host->irq, ha); 2027 free_irq(ha->pdev->irq, rsp);
1863 pci_disable_msi(ha->pdev); 2028 pci_disable_msi(ha->pdev);
1864 } 2029 }
1865} 2030}
2031
2032static struct scsi_qla_host *
2033qla2x00_get_rsp_host(struct rsp_que *rsp)
2034{
2035 srb_t *sp;
2036 struct qla_hw_data *ha = rsp->hw;
2037 struct scsi_qla_host *vha = NULL;
2038 struct sts_entry_24xx *pkt;
2039 struct req_que *req;
2040
2041 if (rsp->id) {
2042 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2043 req = rsp->req;
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp)
2047 vha = sp->vha;
2048 }
2049 }
2050 if (!vha)
2051 /* handle it in base queue */
2052 vha = pci_get_drvdata(ha->pdev);
2053
2054 return vha;
2055}
2056
2057int qla25xx_request_irq(struct rsp_que *rsp)
2058{
2059 struct qla_hw_data *ha = rsp->hw;
2060 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2061 struct qla_msix_entry *msix = rsp->msix;
2062 int ret;
2063
2064 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2065 if (ret) {
2066 qla_printk(KERN_WARNING, ha,
2067 "MSI-X: Unable to register handler -- %x/%d.\n",
2068 msix->vector, ret);
2069 return ret;
2070 }
2071 msix->have_irq = 1;
2072 msix->rsp = rsp;
2073 return ret;
2074}
2075
2076void
2077qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2078{
2079 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2080 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2081}
2082
2083void
2084qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2085{
2086 device_reg_t __iomem *reg = (void *) ha->iobase;
2087 WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2088}
2089
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3402746ec128..a99976f5fabd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -29,7 +29,7 @@
29 * Kernel context. 29 * Kernel context.
30 */ 30 */
31static int 31static int
32qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) 32qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
33{ 33{
34 int rval; 34 int rval;
35 unsigned long flags = 0; 35 unsigned long flags = 0;
@@ -42,15 +42,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
42 uint32_t cnt; 42 uint32_t cnt;
43 uint32_t mboxes; 43 uint32_t mboxes;
44 unsigned long wait_time; 44 unsigned long wait_time;
45 scsi_qla_host_t *ha = to_qla_parent(pvha); 45 struct qla_hw_data *ha = vha->hw;
46 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
46 47
47 reg = ha->iobase; 48 reg = ha->iobase;
48 io_lock_on = ha->flags.init_done; 49 io_lock_on = base_vha->flags.init_done;
49 50
50 rval = QLA_SUCCESS; 51 rval = QLA_SUCCESS;
51 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 52 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
52 53
53 DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no)); 54 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
54 55
55 /* 56 /*
56 * Wait for active mailbox commands to finish by waiting at most tov 57 * Wait for active mailbox commands to finish by waiting at most tov
@@ -62,7 +63,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
62 mcp->tov * HZ)) { 63 mcp->tov * HZ)) {
63 /* Timeout occurred. Return error. */ 64 /* Timeout occurred. Return error. */
64 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 65 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
65 "Exiting.\n", __func__, ha->host_no)); 66 "Exiting.\n", __func__, base_vha->host_no));
66 return QLA_FUNCTION_TIMEOUT; 67 return QLA_FUNCTION_TIMEOUT;
67 } 68 }
68 } 69 }
@@ -72,7 +73,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
72 ha->mcp = mcp; 73 ha->mcp = mcp;
73 74
74 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 75 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
75 ha->host_no, mcp->mb[0])); 76 base_vha->host_no, mcp->mb[0]));
76 77
77 spin_lock_irqsave(&ha->hardware_lock, flags); 78 spin_lock_irqsave(&ha->hardware_lock, flags);
78 79
@@ -100,15 +101,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
100 101
101#if defined(QL_DEBUG_LEVEL_1) 102#if defined(QL_DEBUG_LEVEL_1)
102 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 103 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
103 __func__, ha->host_no); 104 __func__, base_vha->host_no);
104 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 105 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
105 printk("\n"); 106 printk("\n");
106 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 107 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
107 printk("\n"); 108 printk("\n");
108 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 109 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
109 printk("\n"); 110 printk("\n");
110 printk("%s(%ld): I/O address = %p.\n", __func__, ha->host_no, optr); 111 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
111 qla2x00_dump_regs(ha); 112 optr);
113 qla2x00_dump_regs(base_vha);
112#endif 114#endif
113 115
114 /* Issue set host interrupt command to send cmd out. */ 116 /* Issue set host interrupt command to send cmd out. */
@@ -117,7 +119,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
117 119
118 /* Unlock mbx registers and wait for interrupt */ 120 /* Unlock mbx registers and wait for interrupt */
119 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 121 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
120 "jiffies=%lx.\n", __func__, ha->host_no, jiffies)); 122 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
121 123
122 /* Wait for mbx cmd completion until timeout */ 124 /* Wait for mbx cmd completion until timeout */
123 125
@@ -137,7 +139,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
137 139
138 } else { 140 } else {
139 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 141 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
140 ha->host_no, command)); 142 base_vha->host_no, command));
141 143
142 if (IS_FWI2_CAPABLE(ha)) 144 if (IS_FWI2_CAPABLE(ha))
143 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 145 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -151,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
151 break; 153 break;
152 154
153 /* Check for pending interrupts. */ 155 /* Check for pending interrupts. */
154 qla2x00_poll(ha); 156 qla2x00_poll(ha->rsp_q_map[0]);
155 157
156 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 158 if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
157 !ha->flags.mbox_int) 159 !ha->flags.mbox_int)
@@ -164,7 +166,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
164 uint16_t *iptr2; 166 uint16_t *iptr2;
165 167
166 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 168 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
167 ha->host_no, command)); 169 base_vha->host_no, command));
168 170
169 /* Got interrupt. Clear the flag. */ 171 /* Got interrupt. Clear the flag. */
170 ha->flags.mbox_int = 0; 172 ha->flags.mbox_int = 0;
@@ -200,12 +202,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
200 ictrl = RD_REG_WORD(&reg->isp.ictrl); 202 ictrl = RD_REG_WORD(&reg->isp.ictrl);
201 } 203 }
202 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 204 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
203 __func__, ha->host_no, command); 205 __func__, base_vha->host_no, command);
204 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 206 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
205 ha->host_no, ictrl, jiffies); 207 base_vha->host_no, ictrl, jiffies);
206 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 208 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
207 ha->host_no, mb0); 209 base_vha->host_no, mb0);
208 qla2x00_dump_regs(ha); 210 qla2x00_dump_regs(base_vha);
209#endif 211#endif
210 212
211 rval = QLA_FUNCTION_TIMEOUT; 213 rval = QLA_FUNCTION_TIMEOUT;
@@ -218,10 +220,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
218 220
219 if (abort_active || !io_lock_on) { 221 if (abort_active || !io_lock_on) {
220 DEBUG11(printk("%s(%ld): checking for additional resp " 222 DEBUG11(printk("%s(%ld): checking for additional resp "
221 "interrupt.\n", __func__, ha->host_no)); 223 "interrupt.\n", __func__, base_vha->host_no));
222 224
223 /* polling mode for non isp_abort commands. */ 225 /* polling mode for non isp_abort commands. */
224 qla2x00_poll(ha); 226 qla2x00_poll(ha->rsp_q_map[0]);
225 } 227 }
226 228
227 if (rval == QLA_FUNCTION_TIMEOUT && 229 if (rval == QLA_FUNCTION_TIMEOUT &&
@@ -229,35 +231,37 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
229 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 231 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
230 /* not in dpc. schedule it for dpc to take over. */ 232 /* not in dpc. schedule it for dpc to take over. */
231 DEBUG(printk("%s(%ld): timeout schedule " 233 DEBUG(printk("%s(%ld): timeout schedule "
232 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__,
235 base_vha->host_no));
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 236 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 237 "isp_abort_needed.\n", __func__,
238 base_vha->host_no));
235 qla_printk(KERN_WARNING, ha, 239 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occurred. Scheduling ISP " 240 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 241 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 242 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 243 qla2xxx_wake_dpc(vha);
240 } else if (!abort_active) { 244 } else if (!abort_active) {
241 /* call abort directly since we are in the DPC thread */ 245 /* call abort directly since we are in the DPC thread */
242 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 246 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
243 __func__, ha->host_no)); 247 __func__, base_vha->host_no));
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 248 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 249 "abort_isp\n", __func__, base_vha->host_no));
246 qla_printk(KERN_WARNING, ha, 250 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occurred. Issuing ISP " 251 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 252 "abort.\n");
249 253
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 254 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
251 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 255 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
252 if (qla2x00_abort_isp(ha)) { 256 if (qla2x00_abort_isp(base_vha)) {
253 /* Failed. retry later. */ 257 /* Failed. retry later. */
254 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 258 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
255 } 259 }
256 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 260 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
257 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, 261 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
258 ha->host_no)); 262 base_vha->host_no));
259 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n", 263 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
260 __func__, ha->host_no)); 264 __func__, base_vha->host_no));
261 } 265 }
262 } 266 }
263 267
@@ -267,24 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
267 271
268 if (rval) { 272 if (rval) {
269 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 273 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
270 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no, 274 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
271 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 275 mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
272 } else { 276 } else {
273 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 277 DEBUG11(printk("%s(%ld): done.\n", __func__,
278 base_vha->host_no));
274 } 279 }
275 280
276 return rval; 281 return rval;
277} 282}
278 283
279int 284int
280qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr, 285qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
281 uint32_t risc_code_size) 286 uint32_t risc_code_size)
282{ 287{
283 int rval; 288 int rval;
289 struct qla_hw_data *ha = vha->hw;
284 mbx_cmd_t mc; 290 mbx_cmd_t mc;
285 mbx_cmd_t *mcp = &mc; 291 mbx_cmd_t *mcp = &mc;
286 292
287 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 293 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
288 294
289 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 295 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
290 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 296 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -312,13 +318,13 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
312 mcp->in_mb = MBX_0; 318 mcp->in_mb = MBX_0;
313 mcp->tov = MBX_TOV_SECONDS; 319 mcp->tov = MBX_TOV_SECONDS;
314 mcp->flags = 0; 320 mcp->flags = 0;
315 rval = qla2x00_mailbox_command(ha, mcp); 321 rval = qla2x00_mailbox_command(vha, mcp);
316 322
317 if (rval != QLA_SUCCESS) { 323 if (rval != QLA_SUCCESS) {
318 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 324 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
319 ha->host_no, rval, mcp->mb[0])); 325 vha->host_no, rval, mcp->mb[0]));
320 } else { 326 } else {
321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 327 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
322 } 328 }
323 329
324 return rval; 330 return rval;
@@ -340,13 +346,14 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
340 * Kernel context. 346 * Kernel context.
341 */ 347 */
342int 348int
343qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr) 349qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
344{ 350{
345 int rval; 351 int rval;
352 struct qla_hw_data *ha = vha->hw;
346 mbx_cmd_t mc; 353 mbx_cmd_t mc;
347 mbx_cmd_t *mcp = &mc; 354 mbx_cmd_t *mcp = &mc;
348 355
349 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 356 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
350 357
351 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 358 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
352 mcp->out_mb = MBX_0; 359 mcp->out_mb = MBX_0;
@@ -369,18 +376,18 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
369 376
370 mcp->tov = MBX_TOV_SECONDS; 377 mcp->tov = MBX_TOV_SECONDS;
371 mcp->flags = 0; 378 mcp->flags = 0;
372 rval = qla2x00_mailbox_command(ha, mcp); 379 rval = qla2x00_mailbox_command(vha, mcp);
373 380
374 if (rval != QLA_SUCCESS) { 381 if (rval != QLA_SUCCESS) {
375 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 382 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
376 ha->host_no, rval, mcp->mb[0])); 383 vha->host_no, rval, mcp->mb[0]));
377 } else { 384 } else {
378 if (IS_FWI2_CAPABLE(ha)) { 385 if (IS_FWI2_CAPABLE(ha)) {
379 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 386 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
380 __func__, ha->host_no, mcp->mb[1])); 387 __func__, vha->host_no, mcp->mb[1]));
381 } else { 388 } else {
382 DEBUG11(printk("%s(%ld): done.\n", __func__, 389 DEBUG11(printk("%s(%ld): done.\n", __func__,
383 ha->host_no)); 390 vha->host_no));
384 } 391 }
385 } 392 }
386 393
@@ -404,28 +411,28 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
404 * Kernel context. 411 * Kernel context.
405 */ 412 */
406void 413void
407qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor, 414qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
408 uint16_t *subminor, uint16_t *attributes, uint32_t *memory) 415 uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
409{ 416{
410 int rval; 417 int rval;
411 mbx_cmd_t mc; 418 mbx_cmd_t mc;
412 mbx_cmd_t *mcp = &mc; 419 mbx_cmd_t *mcp = &mc;
413 420
414 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 421 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
415 422
416 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
417 mcp->out_mb = MBX_0; 424 mcp->out_mb = MBX_0;
418 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
419 mcp->flags = 0; 426 mcp->flags = 0;
420 mcp->tov = MBX_TOV_SECONDS; 427 mcp->tov = MBX_TOV_SECONDS;
421 rval = qla2x00_mailbox_command(ha, mcp); 428 rval = qla2x00_mailbox_command(vha, mcp);
422 429
423 /* Return mailbox data. */ 430 /* Return mailbox data. */
424 *major = mcp->mb[1]; 431 *major = mcp->mb[1];
425 *minor = mcp->mb[2]; 432 *minor = mcp->mb[2];
426 *subminor = mcp->mb[3]; 433 *subminor = mcp->mb[3];
427 *attributes = mcp->mb[6]; 434 *attributes = mcp->mb[6];
428 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 435 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
429 *memory = 0x1FFFF; /* Defaults to 128KB. */ 436 *memory = 0x1FFFF; /* Defaults to 128KB. */
430 else 437 else
431 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 438 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
@@ -433,10 +440,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
433 if (rval != QLA_SUCCESS) { 440 if (rval != QLA_SUCCESS) {
434 /*EMPTY*/ 441 /*EMPTY*/
435 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 442 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
436 ha->host_no, rval)); 443 vha->host_no, rval));
437 } else { 444 } else {
438 /*EMPTY*/ 445 /*EMPTY*/
439 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 446 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
440 } 447 }
441} 448}
442 449
@@ -455,32 +462,32 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
455 * Kernel context. 462 * Kernel context.
456 */ 463 */
457int 464int
458qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 465qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
459{ 466{
460 int rval; 467 int rval;
461 mbx_cmd_t mc; 468 mbx_cmd_t mc;
462 mbx_cmd_t *mcp = &mc; 469 mbx_cmd_t *mcp = &mc;
463 470
464 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
465 472
466 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 473 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
467 mcp->out_mb = MBX_0; 474 mcp->out_mb = MBX_0;
468 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 475 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
469 mcp->tov = MBX_TOV_SECONDS; 476 mcp->tov = MBX_TOV_SECONDS;
470 mcp->flags = 0; 477 mcp->flags = 0;
471 rval = qla2x00_mailbox_command(ha, mcp); 478 rval = qla2x00_mailbox_command(vha, mcp);
472 479
473 if (rval != QLA_SUCCESS) { 480 if (rval != QLA_SUCCESS) {
474 /*EMPTY*/ 481 /*EMPTY*/
475 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 482 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
476 ha->host_no, rval)); 483 vha->host_no, rval));
477 } else { 484 } else {
478 fwopts[0] = mcp->mb[0]; 485 fwopts[0] = mcp->mb[0];
479 fwopts[1] = mcp->mb[1]; 486 fwopts[1] = mcp->mb[1];
480 fwopts[2] = mcp->mb[2]; 487 fwopts[2] = mcp->mb[2];
481 fwopts[3] = mcp->mb[3]; 488 fwopts[3] = mcp->mb[3];
482 489
483 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 490 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
484 } 491 }
485 492
486 return rval; 493 return rval;
@@ -502,13 +509,13 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
502 * Kernel context. 509 * Kernel context.
503 */ 510 */
504int 511int
505qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 512qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
506{ 513{
507 int rval; 514 int rval;
508 mbx_cmd_t mc; 515 mbx_cmd_t mc;
509 mbx_cmd_t *mcp = &mc; 516 mbx_cmd_t *mcp = &mc;
510 517
511 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
512 519
513 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 520 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
514 mcp->mb[1] = fwopts[1]; 521 mcp->mb[1] = fwopts[1];
@@ -516,7 +523,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
516 mcp->mb[3] = fwopts[3]; 523 mcp->mb[3] = fwopts[3];
517 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 524 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
518 mcp->in_mb = MBX_0; 525 mcp->in_mb = MBX_0;
519 if (IS_FWI2_CAPABLE(ha)) { 526 if (IS_FWI2_CAPABLE(vha->hw)) {
520 mcp->in_mb |= MBX_1; 527 mcp->in_mb |= MBX_1;
521 } else { 528 } else {
522 mcp->mb[10] = fwopts[10]; 529 mcp->mb[10] = fwopts[10];
@@ -526,17 +533,17 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
526 } 533 }
527 mcp->tov = MBX_TOV_SECONDS; 534 mcp->tov = MBX_TOV_SECONDS;
528 mcp->flags = 0; 535 mcp->flags = 0;
529 rval = qla2x00_mailbox_command(ha, mcp); 536 rval = qla2x00_mailbox_command(vha, mcp);
530 537
531 fwopts[0] = mcp->mb[0]; 538 fwopts[0] = mcp->mb[0];
532 539
533 if (rval != QLA_SUCCESS) { 540 if (rval != QLA_SUCCESS) {
534 /*EMPTY*/ 541 /*EMPTY*/
535 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 542 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
536 ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 543 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
537 } else { 544 } else {
538 /*EMPTY*/ 545 /*EMPTY*/
539 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 546 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
540 } 547 }
541 548
542 return rval; 549 return rval;
@@ -558,13 +565,14 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
558 * Kernel context. 565 * Kernel context.
559 */ 566 */
560int 567int
561qla2x00_mbx_reg_test(scsi_qla_host_t *ha) 568qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
562{ 569{
563 int rval; 570 int rval;
571 struct qla_hw_data *ha = vha->hw;
564 mbx_cmd_t mc; 572 mbx_cmd_t mc;
565 mbx_cmd_t *mcp = &mc; 573 mbx_cmd_t *mcp = &mc;
566 574
567 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no)); 575 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
568 576
569 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 577 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
570 mcp->mb[1] = 0xAAAA; 578 mcp->mb[1] = 0xAAAA;
@@ -578,7 +586,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
578 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 586 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
579 mcp->tov = MBX_TOV_SECONDS; 587 mcp->tov = MBX_TOV_SECONDS;
580 mcp->flags = 0; 588 mcp->flags = 0;
581 rval = qla2x00_mailbox_command(ha, mcp); 589 rval = qla2x00_mailbox_command(vha, mcp);
582 590
583 if (rval == QLA_SUCCESS) { 591 if (rval == QLA_SUCCESS) {
584 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 592 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
@@ -591,7 +599,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
591 struct device_reg_24xx __iomem *reg = 599 struct device_reg_24xx __iomem *reg =
592 &ha->iobase->isp24; 600 &ha->iobase->isp24;
593 601
594 qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0, 602 qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
595 LSW(RD_REG_DWORD(&reg->hccr)), 603 LSW(RD_REG_DWORD(&reg->hccr)),
596 LSW(RD_REG_DWORD(&reg->istatus))); 604 LSW(RD_REG_DWORD(&reg->istatus)));
597 } 605 }
@@ -600,11 +608,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
600 if (rval != QLA_SUCCESS) { 608 if (rval != QLA_SUCCESS) {
601 /*EMPTY*/ 609 /*EMPTY*/
602 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 610 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
603 ha->host_no, rval)); 611 vha->host_no, rval));
604 } else { 612 } else {
605 /*EMPTY*/ 613 /*EMPTY*/
606 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
607 ha->host_no)); 615 vha->host_no));
608 } 616 }
609 617
610 return rval; 618 return rval;
@@ -626,18 +634,18 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
626 * Kernel context. 634 * Kernel context.
627 */ 635 */
628int 636int
629qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr) 637qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
630{ 638{
631 int rval; 639 int rval;
632 mbx_cmd_t mc; 640 mbx_cmd_t mc;
633 mbx_cmd_t *mcp = &mc; 641 mbx_cmd_t *mcp = &mc;
634 642
635 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
636 644
637 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 645 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
638 mcp->out_mb = MBX_0; 646 mcp->out_mb = MBX_0;
639 mcp->in_mb = MBX_0; 647 mcp->in_mb = MBX_0;
640 if (IS_FWI2_CAPABLE(ha)) { 648 if (IS_FWI2_CAPABLE(vha->hw)) {
641 mcp->mb[1] = MSW(risc_addr); 649 mcp->mb[1] = MSW(risc_addr);
642 mcp->mb[2] = LSW(risc_addr); 650 mcp->mb[2] = LSW(risc_addr);
643 mcp->out_mb |= MBX_2|MBX_1; 651 mcp->out_mb |= MBX_2|MBX_1;
@@ -650,14 +658,14 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
650 658
651 mcp->tov = MBX_TOV_SECONDS; 659 mcp->tov = MBX_TOV_SECONDS;
652 mcp->flags = 0; 660 mcp->flags = 0;
653 rval = qla2x00_mailbox_command(ha, mcp); 661 rval = qla2x00_mailbox_command(vha, mcp);
654 662
655 if (rval != QLA_SUCCESS) { 663 if (rval != QLA_SUCCESS) {
656 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 664 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
657 ha->host_no, rval, IS_FWI2_CAPABLE(ha) ? 665 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
658 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 666 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
659 } else { 667 } else {
660 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 668 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
661 } 669 }
662 670
663 return rval; 671 return rval;
@@ -682,7 +690,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
682 * Kernel context. 690 * Kernel context.
683 */ 691 */
684static int 692static int
685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, 693qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
686 dma_addr_t phys_addr, size_t size, uint32_t tov) 694 dma_addr_t phys_addr, size_t size, uint32_t tov)
687{ 695{
688 int rval; 696 int rval;
@@ -699,30 +707,28 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
699 mcp->in_mb = MBX_2|MBX_0; 707 mcp->in_mb = MBX_2|MBX_0;
700 mcp->tov = tov; 708 mcp->tov = tov;
701 mcp->flags = 0; 709 mcp->flags = 0;
702 rval = qla2x00_mailbox_command(ha, mcp); 710 rval = qla2x00_mailbox_command(vha, mcp);
703 711
704 if (rval != QLA_SUCCESS) { 712 if (rval != QLA_SUCCESS) {
705 /*EMPTY*/ 713 /*EMPTY*/
706 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 714 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
707 ha->host_no, rval)); 715 vha->host_no, rval));
708 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
709 ha->host_no, rval));
710 } else { 716 } else {
711 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 717 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
712 718
713 /* Mask reserved bits. */ 719 /* Mask reserved bits. */
714 sts_entry->entry_status &= 720 sts_entry->entry_status &=
715 IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK; 721 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
716 } 722 }
717 723
718 return rval; 724 return rval;
719} 725}
720 726
721int 727int
722qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr, 728qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
723 size_t size) 729 size_t size)
724{ 730{
725 return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size, 731 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
726 MBX_TOV_SECONDS); 732 MBX_TOV_SECONDS);
727} 733}
728 734
@@ -741,22 +747,23 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
741 * Kernel context. 747 * Kernel context.
742 */ 748 */
743int 749int
744qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp) 750qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
745{ 751{
746 unsigned long flags = 0; 752 unsigned long flags = 0;
747 fc_port_t *fcport; 753 fc_port_t *fcport;
748 int rval; 754 int rval;
749 uint32_t handle; 755 uint32_t handle = 0;
750 mbx_cmd_t mc; 756 mbx_cmd_t mc;
751 mbx_cmd_t *mcp = &mc; 757 mbx_cmd_t *mcp = &mc;
758 struct qla_hw_data *ha = vha->hw;
752 759
753 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no)); 760 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
754 761
755 fcport = sp->fcport; 762 fcport = sp->fcport;
756 763
757 spin_lock_irqsave(&ha->hardware_lock, flags); 764 spin_lock_irqsave(&ha->hardware_lock, flags);
758 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 765 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
759 if (ha->outstanding_cmds[handle] == sp) 766 if (req->outstanding_cmds[handle] == sp)
760 break; 767 break;
761 } 768 }
762 spin_unlock_irqrestore(&ha->hardware_lock, flags); 769 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -778,14 +785,14 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
778 mcp->in_mb = MBX_0; 785 mcp->in_mb = MBX_0;
779 mcp->tov = MBX_TOV_SECONDS; 786 mcp->tov = MBX_TOV_SECONDS;
780 mcp->flags = 0; 787 mcp->flags = 0;
781 rval = qla2x00_mailbox_command(ha, mcp); 788 rval = qla2x00_mailbox_command(vha, mcp);
782 789
783 if (rval != QLA_SUCCESS) { 790 if (rval != QLA_SUCCESS) {
784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 791 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
785 ha->host_no, rval)); 792 vha->host_no, rval));
786 } else { 793 } else {
787 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 794 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
788 ha->host_no)); 795 vha->host_no));
789 } 796 }
790 797
791 return rval; 798 return rval;
@@ -797,40 +804,45 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
797 int rval, rval2; 804 int rval, rval2;
798 mbx_cmd_t mc; 805 mbx_cmd_t mc;
799 mbx_cmd_t *mcp = &mc; 806 mbx_cmd_t *mcp = &mc;
800 scsi_qla_host_t *ha; 807 scsi_qla_host_t *vha;
808 struct req_que *req;
809 struct rsp_que *rsp;
801 810
802 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 811 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
803 812
804 l = l; 813 l = l;
805 ha = fcport->ha; 814 vha = fcport->vha;
815 req = vha->hw->req_q_map[0];
816 rsp = vha->hw->rsp_q_map[0];
806 mcp->mb[0] = MBC_ABORT_TARGET; 817 mcp->mb[0] = MBC_ABORT_TARGET;
807 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 818 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
808 if (HAS_EXTENDED_IDS(ha)) { 819 if (HAS_EXTENDED_IDS(vha->hw)) {
809 mcp->mb[1] = fcport->loop_id; 820 mcp->mb[1] = fcport->loop_id;
810 mcp->mb[10] = 0; 821 mcp->mb[10] = 0;
811 mcp->out_mb |= MBX_10; 822 mcp->out_mb |= MBX_10;
812 } else { 823 } else {
813 mcp->mb[1] = fcport->loop_id << 8; 824 mcp->mb[1] = fcport->loop_id << 8;
814 } 825 }
815 mcp->mb[2] = ha->loop_reset_delay; 826 mcp->mb[2] = vha->hw->loop_reset_delay;
816 mcp->mb[9] = ha->vp_idx; 827 mcp->mb[9] = vha->vp_idx;
817 828
818 mcp->in_mb = MBX_0; 829 mcp->in_mb = MBX_0;
819 mcp->tov = MBX_TOV_SECONDS; 830 mcp->tov = MBX_TOV_SECONDS;
820 mcp->flags = 0; 831 mcp->flags = 0;
821 rval = qla2x00_mailbox_command(ha, mcp); 832 rval = qla2x00_mailbox_command(vha, mcp);
822 if (rval != QLA_SUCCESS) { 833 if (rval != QLA_SUCCESS) {
823 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 834 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
824 ha->host_no, rval)); 835 vha->host_no, rval));
825 } 836 }
826 837
827 /* Issue marker IOCB. */ 838 /* Issue marker IOCB. */
828 rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); 839 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
840 MK_SYNC_ID);
829 if (rval2 != QLA_SUCCESS) { 841 if (rval2 != QLA_SUCCESS) {
830 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 842 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
831 "(%x).\n", __func__, ha->host_no, rval2)); 843 "(%x).\n", __func__, vha->host_no, rval2));
832 } else { 844 } else {
833 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 845 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
834 } 846 }
835 847
836 return rval; 848 return rval;
@@ -842,37 +854,42 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
842 int rval, rval2; 854 int rval, rval2;
843 mbx_cmd_t mc; 855 mbx_cmd_t mc;
844 mbx_cmd_t *mcp = &mc; 856 mbx_cmd_t *mcp = &mc;
845 scsi_qla_host_t *ha; 857 scsi_qla_host_t *vha;
858 struct req_que *req;
859 struct rsp_que *rsp;
846 860
847 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 861 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
848 862
849 ha = fcport->ha; 863 vha = fcport->vha;
864 req = vha->hw->req_q_map[0];
865 rsp = vha->hw->rsp_q_map[0];
850 mcp->mb[0] = MBC_LUN_RESET; 866 mcp->mb[0] = MBC_LUN_RESET;
851 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 867 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
852 if (HAS_EXTENDED_IDS(ha)) 868 if (HAS_EXTENDED_IDS(vha->hw))
853 mcp->mb[1] = fcport->loop_id; 869 mcp->mb[1] = fcport->loop_id;
854 else 870 else
855 mcp->mb[1] = fcport->loop_id << 8; 871 mcp->mb[1] = fcport->loop_id << 8;
856 mcp->mb[2] = l; 872 mcp->mb[2] = l;
857 mcp->mb[3] = 0; 873 mcp->mb[3] = 0;
858 mcp->mb[9] = ha->vp_idx; 874 mcp->mb[9] = vha->vp_idx;
859 875
860 mcp->in_mb = MBX_0; 876 mcp->in_mb = MBX_0;
861 mcp->tov = MBX_TOV_SECONDS; 877 mcp->tov = MBX_TOV_SECONDS;
862 mcp->flags = 0; 878 mcp->flags = 0;
863 rval = qla2x00_mailbox_command(ha, mcp); 879 rval = qla2x00_mailbox_command(vha, mcp);
864 if (rval != QLA_SUCCESS) { 880 if (rval != QLA_SUCCESS) {
865 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 881 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
866 ha->host_no, rval)); 882 vha->host_no, rval));
867 } 883 }
868 884
869 /* Issue marker IOCB. */ 885 /* Issue marker IOCB. */
870 rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN); 886 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
887 MK_SYNC_ID_LUN);
871 if (rval2 != QLA_SUCCESS) { 888 if (rval2 != QLA_SUCCESS) {
872 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 889 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
873 "(%x).\n", __func__, ha->host_no, rval2)); 890 "(%x).\n", __func__, vha->host_no, rval2));
874 } else { 891 } else {
875 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 892 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
876 } 893 }
877 894
878 return rval; 895 return rval;
@@ -899,7 +916,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
899 * Kernel context. 916 * Kernel context.
900 */ 917 */
901int 918int
902qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa, 919qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
903 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 920 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
904{ 921{
905 int rval; 922 int rval;
@@ -907,15 +924,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
907 mbx_cmd_t *mcp = &mc; 924 mbx_cmd_t *mcp = &mc;
908 925
909 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 926 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
910 ha->host_no)); 927 vha->host_no));
911 928
912 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 929 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
913 mcp->mb[9] = ha->vp_idx; 930 mcp->mb[9] = vha->vp_idx;
914 mcp->out_mb = MBX_9|MBX_0; 931 mcp->out_mb = MBX_9|MBX_0;
915 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 932 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
916 mcp->tov = MBX_TOV_SECONDS; 933 mcp->tov = MBX_TOV_SECONDS;
917 mcp->flags = 0; 934 mcp->flags = 0;
918 rval = qla2x00_mailbox_command(ha, mcp); 935 rval = qla2x00_mailbox_command(vha, mcp);
919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 936 if (mcp->mb[0] == MBS_COMMAND_ERROR)
920 rval = QLA_COMMAND_ERROR; 937 rval = QLA_COMMAND_ERROR;
921 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 938 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
@@ -932,11 +949,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
932 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
933 /*EMPTY*/ 950 /*EMPTY*/
934 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 951 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
935 ha->host_no, rval)); 952 vha->host_no, rval));
936 } else { 953 } else {
937 /*EMPTY*/ 954 /*EMPTY*/
938 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 955 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
939 ha->host_no)); 956 vha->host_no));
940 } 957 }
941 958
942 return rval; 959 return rval;
@@ -958,7 +975,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
958 * Kernel context. 975 * Kernel context.
959 */ 976 */
960int 977int
961qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov, 978qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
962 uint16_t *r_a_tov) 979 uint16_t *r_a_tov)
963{ 980{
964 int rval; 981 int rval;
@@ -967,19 +984,19 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
967 mbx_cmd_t *mcp = &mc; 984 mbx_cmd_t *mcp = &mc;
968 985
969 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 986 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
970 ha->host_no)); 987 vha->host_no));
971 988
972 mcp->mb[0] = MBC_GET_RETRY_COUNT; 989 mcp->mb[0] = MBC_GET_RETRY_COUNT;
973 mcp->out_mb = MBX_0; 990 mcp->out_mb = MBX_0;
974 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 991 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
975 mcp->tov = MBX_TOV_SECONDS; 992 mcp->tov = MBX_TOV_SECONDS;
976 mcp->flags = 0; 993 mcp->flags = 0;
977 rval = qla2x00_mailbox_command(ha, mcp); 994 rval = qla2x00_mailbox_command(vha, mcp);
978 995
979 if (rval != QLA_SUCCESS) { 996 if (rval != QLA_SUCCESS) {
980 /*EMPTY*/ 997 /*EMPTY*/
981 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 998 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
982 ha->host_no, mcp->mb[0])); 999 vha->host_no, mcp->mb[0]));
983 } else { 1000 } else {
984 /* Convert returned data and check our values. */ 1001 /* Convert returned data and check our values. */
985 *r_a_tov = mcp->mb[3] / 2; 1002 *r_a_tov = mcp->mb[3] / 2;
@@ -991,7 +1008,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
991 } 1008 }
992 1009
993 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1010 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
994 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov)); 1011 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
995 } 1012 }
996 1013
997 return rval; 1014 return rval;
@@ -1015,14 +1032,15 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
1015 * Kernel context. 1032 * Kernel context.
1016 */ 1033 */
1017int 1034int
1018qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size) 1035qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1019{ 1036{
1020 int rval; 1037 int rval;
1021 mbx_cmd_t mc; 1038 mbx_cmd_t mc;
1022 mbx_cmd_t *mcp = &mc; 1039 mbx_cmd_t *mcp = &mc;
1040 struct qla_hw_data *ha = vha->hw;
1023 1041
1024 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1042 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1025 ha->host_no)); 1043 vha->host_no));
1026 1044
1027 if (ha->flags.npiv_supported) 1045 if (ha->flags.npiv_supported)
1028 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1046 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
@@ -1040,17 +1058,17 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1040 mcp->buf_size = size; 1058 mcp->buf_size = size;
1041 mcp->flags = MBX_DMA_OUT; 1059 mcp->flags = MBX_DMA_OUT;
1042 mcp->tov = MBX_TOV_SECONDS; 1060 mcp->tov = MBX_TOV_SECONDS;
1043 rval = qla2x00_mailbox_command(ha, mcp); 1061 rval = qla2x00_mailbox_command(vha, mcp);
1044 1062
1045 if (rval != QLA_SUCCESS) { 1063 if (rval != QLA_SUCCESS) {
1046 /*EMPTY*/ 1064 /*EMPTY*/
1047 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1065 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
1048 "mb0=%x.\n", 1066 "mb0=%x.\n",
1049 ha->host_no, rval, mcp->mb[0])); 1067 vha->host_no, rval, mcp->mb[0]));
1050 } else { 1068 } else {
1051 /*EMPTY*/ 1069 /*EMPTY*/
1052 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1070 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
1053 ha->host_no)); 1071 vha->host_no));
1054 } 1072 }
1055 1073
1056 return rval; 1074 return rval;
@@ -1073,7 +1091,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1073 * Kernel context. 1091 * Kernel context.
1074 */ 1092 */
1075int 1093int
1076qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt) 1094qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1077{ 1095{
1078 int rval; 1096 int rval;
1079 mbx_cmd_t mc; 1097 mbx_cmd_t mc;
@@ -1081,14 +1099,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1081 port_database_t *pd; 1099 port_database_t *pd;
1082 struct port_database_24xx *pd24; 1100 struct port_database_24xx *pd24;
1083 dma_addr_t pd_dma; 1101 dma_addr_t pd_dma;
1102 struct qla_hw_data *ha = vha->hw;
1084 1103
1085 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1104 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1086 1105
1087 pd24 = NULL; 1106 pd24 = NULL;
1088 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1107 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1089 if (pd == NULL) { 1108 if (pd == NULL) {
1090 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1109 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
1091 "structure.\n", __func__, ha->host_no)); 1110 "structure.\n", __func__, vha->host_no));
1092 return QLA_MEMORY_ALLOC_FAILED; 1111 return QLA_MEMORY_ALLOC_FAILED;
1093 } 1112 }
1094 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1113 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1100,7 +1119,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1100 mcp->mb[3] = LSW(pd_dma); 1119 mcp->mb[3] = LSW(pd_dma);
1101 mcp->mb[6] = MSW(MSD(pd_dma)); 1120 mcp->mb[6] = MSW(MSD(pd_dma));
1102 mcp->mb[7] = LSW(MSD(pd_dma)); 1121 mcp->mb[7] = LSW(MSD(pd_dma));
1103 mcp->mb[9] = ha->vp_idx; 1122 mcp->mb[9] = vha->vp_idx;
1104 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1123 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1105 mcp->in_mb = MBX_0; 1124 mcp->in_mb = MBX_0;
1106 if (IS_FWI2_CAPABLE(ha)) { 1125 if (IS_FWI2_CAPABLE(ha)) {
@@ -1120,7 +1139,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1120 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1139 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1121 mcp->flags = MBX_DMA_IN; 1140 mcp->flags = MBX_DMA_IN;
1122 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1141 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1123 rval = qla2x00_mailbox_command(ha, mcp); 1142 rval = qla2x00_mailbox_command(vha, mcp);
1124 if (rval != QLA_SUCCESS) 1143 if (rval != QLA_SUCCESS)
1125 goto gpd_error_out; 1144 goto gpd_error_out;
1126 1145
@@ -1132,7 +1151,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1132 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1151 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1133 DEBUG2(printk("%s(%ld): Unable to verify " 1152 DEBUG2(printk("%s(%ld): Unable to verify "
1134 "login-state (%x/%x) for loop_id %x\n", 1153 "login-state (%x/%x) for loop_id %x\n",
1135 __func__, ha->host_no, 1154 __func__, vha->host_no,
1136 pd24->current_login_state, 1155 pd24->current_login_state,
1137 pd24->last_login_state, fcport->loop_id)); 1156 pd24->last_login_state, fcport->loop_id));
1138 rval = QLA_FUNCTION_FAILED; 1157 rval = QLA_FUNCTION_FAILED;
@@ -1192,9 +1211,9 @@ gpd_error_out:
1192 1211
1193 if (rval != QLA_SUCCESS) { 1212 if (rval != QLA_SUCCESS) {
1194 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1213 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
1195 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1214 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1196 } else { 1215 } else {
1197 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1216 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1198 } 1217 }
1199 1218
1200 return rval; 1219 return rval;
@@ -1217,21 +1236,21 @@ gpd_error_out:
1217 * Kernel context. 1236 * Kernel context.
1218 */ 1237 */
1219int 1238int
1220qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states) 1239qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1221{ 1240{
1222 int rval; 1241 int rval;
1223 mbx_cmd_t mc; 1242 mbx_cmd_t mc;
1224 mbx_cmd_t *mcp = &mc; 1243 mbx_cmd_t *mcp = &mc;
1225 1244
1226 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1245 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
1227 ha->host_no)); 1246 vha->host_no));
1228 1247
1229 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1248 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1230 mcp->out_mb = MBX_0; 1249 mcp->out_mb = MBX_0;
1231 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1250 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1232 mcp->tov = MBX_TOV_SECONDS; 1251 mcp->tov = MBX_TOV_SECONDS;
1233 mcp->flags = 0; 1252 mcp->flags = 0;
1234 rval = qla2x00_mailbox_command(ha, mcp); 1253 rval = qla2x00_mailbox_command(vha, mcp);
1235 1254
1236 /* Return firmware states. */ 1255 /* Return firmware states. */
1237 states[0] = mcp->mb[1]; 1256 states[0] = mcp->mb[1];
@@ -1241,11 +1260,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1241 if (rval != QLA_SUCCESS) { 1260 if (rval != QLA_SUCCESS) {
1242 /*EMPTY*/ 1261 /*EMPTY*/
1243 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1262 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
1244 "failed=%x.\n", ha->host_no, rval)); 1263 "failed=%x.\n", vha->host_no, rval));
1245 } else { 1264 } else {
1246 /*EMPTY*/ 1265 /*EMPTY*/
1247 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1266 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
1248 ha->host_no)); 1267 vha->host_no));
1249 } 1268 }
1250 1269
1251 return rval; 1270 return rval;
@@ -1270,7 +1289,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1270 * Kernel context. 1289 * Kernel context.
1271 */ 1290 */
1272int 1291int
1273qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name, 1292qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1274 uint8_t opt) 1293 uint8_t opt)
1275{ 1294{
1276 int rval; 1295 int rval;
@@ -1278,12 +1297,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1278 mbx_cmd_t *mcp = &mc; 1297 mbx_cmd_t *mcp = &mc;
1279 1298
1280 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1299 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
1281 ha->host_no)); 1300 vha->host_no));
1282 1301
1283 mcp->mb[0] = MBC_GET_PORT_NAME; 1302 mcp->mb[0] = MBC_GET_PORT_NAME;
1284 mcp->mb[9] = ha->vp_idx; 1303 mcp->mb[9] = vha->vp_idx;
1285 mcp->out_mb = MBX_9|MBX_1|MBX_0; 1304 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1286 if (HAS_EXTENDED_IDS(ha)) { 1305 if (HAS_EXTENDED_IDS(vha->hw)) {
1287 mcp->mb[1] = loop_id; 1306 mcp->mb[1] = loop_id;
1288 mcp->mb[10] = opt; 1307 mcp->mb[10] = opt;
1289 mcp->out_mb |= MBX_10; 1308 mcp->out_mb |= MBX_10;
@@ -1294,12 +1313,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1294 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1313 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1295 mcp->tov = MBX_TOV_SECONDS; 1314 mcp->tov = MBX_TOV_SECONDS;
1296 mcp->flags = 0; 1315 mcp->flags = 0;
1297 rval = qla2x00_mailbox_command(ha, mcp); 1316 rval = qla2x00_mailbox_command(vha, mcp);
1298 1317
1299 if (rval != QLA_SUCCESS) { 1318 if (rval != QLA_SUCCESS) {
1300 /*EMPTY*/ 1319 /*EMPTY*/
1301 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1320 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
1302 ha->host_no, rval)); 1321 vha->host_no, rval));
1303 } else { 1322 } else {
1304 if (name != NULL) { 1323 if (name != NULL) {
1305 /* This function returns name in big endian. */ 1324 /* This function returns name in big endian. */
@@ -1314,7 +1333,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1314 } 1333 }
1315 1334
1316 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1335 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
1317 ha->host_no)); 1336 vha->host_no));
1318 } 1337 }
1319 1338
1320 return rval; 1339 return rval;
@@ -1336,45 +1355,45 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1336 * Kernel context. 1355 * Kernel context.
1337 */ 1356 */
1338int 1357int
1339qla2x00_lip_reset(scsi_qla_host_t *ha) 1358qla2x00_lip_reset(scsi_qla_host_t *vha)
1340{ 1359{
1341 int rval; 1360 int rval;
1342 mbx_cmd_t mc; 1361 mbx_cmd_t mc;
1343 mbx_cmd_t *mcp = &mc; 1362 mbx_cmd_t *mcp = &mc;
1344 1363
1345 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1346 1365
1347 if (IS_FWI2_CAPABLE(ha)) { 1366 if (IS_FWI2_CAPABLE(vha->hw)) {
1348 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1367 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1349 mcp->mb[1] = BIT_6; 1368 mcp->mb[1] = BIT_6;
1350 mcp->mb[2] = 0; 1369 mcp->mb[2] = 0;
1351 mcp->mb[3] = ha->loop_reset_delay; 1370 mcp->mb[3] = vha->hw->loop_reset_delay;
1352 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1371 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1353 } else { 1372 } else {
1354 mcp->mb[0] = MBC_LIP_RESET; 1373 mcp->mb[0] = MBC_LIP_RESET;
1355 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1374 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1356 if (HAS_EXTENDED_IDS(ha)) { 1375 if (HAS_EXTENDED_IDS(vha->hw)) {
1357 mcp->mb[1] = 0x00ff; 1376 mcp->mb[1] = 0x00ff;
1358 mcp->mb[10] = 0; 1377 mcp->mb[10] = 0;
1359 mcp->out_mb |= MBX_10; 1378 mcp->out_mb |= MBX_10;
1360 } else { 1379 } else {
1361 mcp->mb[1] = 0xff00; 1380 mcp->mb[1] = 0xff00;
1362 } 1381 }
1363 mcp->mb[2] = ha->loop_reset_delay; 1382 mcp->mb[2] = vha->hw->loop_reset_delay;
1364 mcp->mb[3] = 0; 1383 mcp->mb[3] = 0;
1365 } 1384 }
1366 mcp->in_mb = MBX_0; 1385 mcp->in_mb = MBX_0;
1367 mcp->tov = MBX_TOV_SECONDS; 1386 mcp->tov = MBX_TOV_SECONDS;
1368 mcp->flags = 0; 1387 mcp->flags = 0;
1369 rval = qla2x00_mailbox_command(ha, mcp); 1388 rval = qla2x00_mailbox_command(vha, mcp);
1370 1389
1371 if (rval != QLA_SUCCESS) { 1390 if (rval != QLA_SUCCESS) {
1372 /*EMPTY*/ 1391 /*EMPTY*/
1373 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1392 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
1374 __func__, ha->host_no, rval)); 1393 __func__, vha->host_no, rval));
1375 } else { 1394 } else {
1376 /*EMPTY*/ 1395 /*EMPTY*/
1377 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1378 } 1397 }
1379 1398
1380 return rval; 1399 return rval;
@@ -1399,7 +1418,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1399 * Kernel context. 1418 * Kernel context.
1400 */ 1419 */
1401int 1420int
1402qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address, 1421qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1403 uint16_t cmd_size, size_t buf_size) 1422 uint16_t cmd_size, size_t buf_size)
1404{ 1423{
1405 int rval; 1424 int rval;
@@ -1407,10 +1426,11 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1407 mbx_cmd_t *mcp = &mc; 1426 mbx_cmd_t *mcp = &mc;
1408 1427
1409 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1428 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
1410 ha->host_no)); 1429 vha->host_no));
1411 1430
1412 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1431 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
1413 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov)); 1432 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
1433 mcp->tov));
1414 1434
1415 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1435 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1416 mcp->mb[1] = cmd_size; 1436 mcp->mb[1] = cmd_size;
@@ -1422,25 +1442,25 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1422 mcp->in_mb = MBX_0|MBX_1; 1442 mcp->in_mb = MBX_0|MBX_1;
1423 mcp->buf_size = buf_size; 1443 mcp->buf_size = buf_size;
1424 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 1444 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1425 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1445 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1426 rval = qla2x00_mailbox_command(ha, mcp); 1446 rval = qla2x00_mailbox_command(vha, mcp);
1427 1447
1428 if (rval != QLA_SUCCESS) { 1448 if (rval != QLA_SUCCESS) {
1429 /*EMPTY*/ 1449 /*EMPTY*/
1430 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1450 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1431 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1451 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1432 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1452 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1433 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1453 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1434 } else { 1454 } else {
1435 /*EMPTY*/ 1455 /*EMPTY*/
1436 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no)); 1456 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
1437 } 1457 }
1438 1458
1439 return rval; 1459 return rval;
1440} 1460}
1441 1461
1442int 1462int
1443qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1463qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1444 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1464 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1445{ 1465{
1446 int rval; 1466 int rval;
@@ -1448,13 +1468,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1448 struct logio_entry_24xx *lg; 1468 struct logio_entry_24xx *lg;
1449 dma_addr_t lg_dma; 1469 dma_addr_t lg_dma;
1450 uint32_t iop[2]; 1470 uint32_t iop[2];
1471 struct qla_hw_data *ha = vha->hw;
1451 1472
1452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1473 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1453 1474
1454 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1475 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1455 if (lg == NULL) { 1476 if (lg == NULL) {
1456 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1477 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
1457 __func__, ha->host_no)); 1478 __func__, vha->host_no));
1458 return QLA_MEMORY_ALLOC_FAILED; 1479 return QLA_MEMORY_ALLOC_FAILED;
1459 } 1480 }
1460 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1481 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1470,14 +1491,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1470 lg->port_id[0] = al_pa; 1491 lg->port_id[0] = al_pa;
1471 lg->port_id[1] = area; 1492 lg->port_id[1] = area;
1472 lg->port_id[2] = domain; 1493 lg->port_id[2] = domain;
1473 lg->vp_index = ha->vp_idx; 1494 lg->vp_index = vha->vp_idx;
1474 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1495 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1475 if (rval != QLA_SUCCESS) { 1496 if (rval != QLA_SUCCESS) {
1476 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1497 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
1477 "(%x).\n", __func__, ha->host_no, rval)); 1498 "(%x).\n", __func__, vha->host_no, rval));
1478 } else if (lg->entry_status != 0) { 1499 } else if (lg->entry_status != 0) {
1479 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1500 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1480 "-- error status (%x).\n", __func__, ha->host_no, 1501 "-- error status (%x).\n", __func__, vha->host_no,
1481 lg->entry_status)); 1502 lg->entry_status));
1482 rval = QLA_FUNCTION_FAILED; 1503 rval = QLA_FUNCTION_FAILED;
1483 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1504 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
@@ -1486,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1486 1507
1487 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1508 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1488 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1509 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1489 ha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1510 vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
1490 iop[1])); 1511 iop[1]));
1491 1512
1492 switch (iop[0]) { 1513 switch (iop[0]) {
@@ -1515,7 +1536,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1515 break; 1536 break;
1516 } 1537 }
1517 } else { 1538 } else {
1518 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1539 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1519 1540
1520 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1541 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1521 1542
@@ -1562,14 +1583,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1562 * Kernel context. 1583 * Kernel context.
1563 */ 1584 */
1564int 1585int
1565qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1586qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1566 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1587 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1567{ 1588{
1568 int rval; 1589 int rval;
1569 mbx_cmd_t mc; 1590 mbx_cmd_t mc;
1570 mbx_cmd_t *mcp = &mc; 1591 mbx_cmd_t *mcp = &mc;
1592 struct qla_hw_data *ha = vha->hw;
1571 1593
1572 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no)); 1594 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
1573 1595
1574 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1596 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1575 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1597 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1586,7 +1608,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1586 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 1608 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1587 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1609 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1588 mcp->flags = 0; 1610 mcp->flags = 0;
1589 rval = qla2x00_mailbox_command(ha, mcp); 1611 rval = qla2x00_mailbox_command(vha, mcp);
1590 1612
1591 /* Return mailbox statuses. */ 1613 /* Return mailbox statuses. */
1592 if (mb != NULL) { 1614 if (mb != NULL) {
@@ -1613,12 +1635,12 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1613 1635
1614 /*EMPTY*/ 1636 /*EMPTY*/
1615 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1637 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
1616 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval, 1638 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
1617 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1639 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
1618 } else { 1640 } else {
1619 /*EMPTY*/ 1641 /*EMPTY*/
1620 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1642 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
1621 ha->host_no)); 1643 vha->host_no));
1622 } 1644 }
1623 1645
1624 return rval; 1646 return rval;
@@ -1641,19 +1663,20 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1641 * 1663 *
1642 */ 1664 */
1643int 1665int
1644qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport, 1666qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1645 uint16_t *mb_ret, uint8_t opt) 1667 uint16_t *mb_ret, uint8_t opt)
1646{ 1668{
1647 int rval; 1669 int rval;
1648 mbx_cmd_t mc; 1670 mbx_cmd_t mc;
1649 mbx_cmd_t *mcp = &mc; 1671 mbx_cmd_t *mcp = &mc;
1672 struct qla_hw_data *ha = vha->hw;
1650 1673
1651 if (IS_FWI2_CAPABLE(ha)) 1674 if (IS_FWI2_CAPABLE(ha))
1652 return qla24xx_login_fabric(ha, fcport->loop_id, 1675 return qla24xx_login_fabric(vha, fcport->loop_id,
1653 fcport->d_id.b.domain, fcport->d_id.b.area, 1676 fcport->d_id.b.domain, fcport->d_id.b.area,
1654 fcport->d_id.b.al_pa, mb_ret, opt); 1677 fcport->d_id.b.al_pa, mb_ret, opt);
1655 1678
1656 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1679 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1657 1680
1658 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1681 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1659 if (HAS_EXTENDED_IDS(ha)) 1682 if (HAS_EXTENDED_IDS(ha))
@@ -1665,7 +1688,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1665 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 1688 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1666 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1689 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1667 mcp->flags = 0; 1690 mcp->flags = 0;
1668 rval = qla2x00_mailbox_command(ha, mcp); 1691 rval = qla2x00_mailbox_command(vha, mcp);
1669 1692
1670 /* Return mailbox statuses. */ 1693 /* Return mailbox statuses. */
1671 if (mb_ret != NULL) { 1694 if (mb_ret != NULL) {
@@ -1686,33 +1709,34 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1686 rval = QLA_SUCCESS; 1709 rval = QLA_SUCCESS;
1687 1710
1688 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1711 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1689 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1712 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1690 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1713 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1691 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1714 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1692 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1715 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1693 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1716 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1694 } else { 1717 } else {
1695 /*EMPTY*/ 1718 /*EMPTY*/
1696 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1719 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
1697 } 1720 }
1698 1721
1699 return (rval); 1722 return (rval);
1700} 1723}
1701 1724
1702int 1725int
1703qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1726qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1704 uint8_t area, uint8_t al_pa) 1727 uint8_t area, uint8_t al_pa)
1705{ 1728{
1706 int rval; 1729 int rval;
1707 struct logio_entry_24xx *lg; 1730 struct logio_entry_24xx *lg;
1708 dma_addr_t lg_dma; 1731 dma_addr_t lg_dma;
1732 struct qla_hw_data *ha = vha->hw;
1709 1733
1710 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1734 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1711 1735
1712 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1736 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1713 if (lg == NULL) { 1737 if (lg == NULL) {
1714 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1738 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
1715 __func__, ha->host_no)); 1739 __func__, vha->host_no));
1716 return QLA_MEMORY_ALLOC_FAILED; 1740 return QLA_MEMORY_ALLOC_FAILED;
1717 } 1741 }
1718 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1742 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1725,25 +1749,26 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1725 lg->port_id[0] = al_pa; 1749 lg->port_id[0] = al_pa;
1726 lg->port_id[1] = area; 1750 lg->port_id[1] = area;
1727 lg->port_id[2] = domain; 1751 lg->port_id[2] = domain;
1728 lg->vp_index = ha->vp_idx; 1752 lg->vp_index = vha->vp_idx;
1729 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1753
1754 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1730 if (rval != QLA_SUCCESS) { 1755 if (rval != QLA_SUCCESS) {
1731 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1756 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
1732 "(%x).\n", __func__, ha->host_no, rval)); 1757 "(%x).\n", __func__, vha->host_no, rval));
1733 } else if (lg->entry_status != 0) { 1758 } else if (lg->entry_status != 0) {
1734 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1759 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1735 "-- error status (%x).\n", __func__, ha->host_no, 1760 "-- error status (%x).\n", __func__, vha->host_no,
1736 lg->entry_status)); 1761 lg->entry_status));
1737 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
1738 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1763 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1739 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1764 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
1740 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1765 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1741 ha->host_no, le16_to_cpu(lg->comp_status), 1766 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
1742 le32_to_cpu(lg->io_parameter[0]), 1767 le32_to_cpu(lg->io_parameter[0]),
1743 le32_to_cpu(lg->io_parameter[1]))); 1768 le32_to_cpu(lg->io_parameter[1])));
1744 } else { 1769 } else {
1745 /*EMPTY*/ 1770 /*EMPTY*/
1746 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1771 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1747 } 1772 }
1748 1773
1749 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1774 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1768,7 +1793,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1768 * Kernel context. 1793 * Kernel context.
1769 */ 1794 */
1770int 1795int
1771qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1796qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1772 uint8_t area, uint8_t al_pa) 1797 uint8_t area, uint8_t al_pa)
1773{ 1798{
1774 int rval; 1799 int rval;
@@ -1776,11 +1801,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1776 mbx_cmd_t *mcp = &mc; 1801 mbx_cmd_t *mcp = &mc;
1777 1802
1778 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1803 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
1779 ha->host_no)); 1804 vha->host_no));
1780 1805
1781 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1806 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1782 mcp->out_mb = MBX_1|MBX_0; 1807 mcp->out_mb = MBX_1|MBX_0;
1783 if (HAS_EXTENDED_IDS(ha)) { 1808 if (HAS_EXTENDED_IDS(vha->hw)) {
1784 mcp->mb[1] = loop_id; 1809 mcp->mb[1] = loop_id;
1785 mcp->mb[10] = 0; 1810 mcp->mb[10] = 0;
1786 mcp->out_mb |= MBX_10; 1811 mcp->out_mb |= MBX_10;
@@ -1791,16 +1816,16 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1791 mcp->in_mb = MBX_1|MBX_0; 1816 mcp->in_mb = MBX_1|MBX_0;
1792 mcp->tov = MBX_TOV_SECONDS; 1817 mcp->tov = MBX_TOV_SECONDS;
1793 mcp->flags = 0; 1818 mcp->flags = 0;
1794 rval = qla2x00_mailbox_command(ha, mcp); 1819 rval = qla2x00_mailbox_command(vha, mcp);
1795 1820
1796 if (rval != QLA_SUCCESS) { 1821 if (rval != QLA_SUCCESS) {
1797 /*EMPTY*/ 1822 /*EMPTY*/
1798 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1823 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
1799 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1])); 1824 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
1800 } else { 1825 } else {
1801 /*EMPTY*/ 1826 /*EMPTY*/
1802 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1827 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
1803 ha->host_no)); 1828 vha->host_no));
1804 } 1829 }
1805 1830
1806 return rval; 1831 return rval;
@@ -1822,33 +1847,33 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1822 * Kernel context. 1847 * Kernel context.
1823 */ 1848 */
1824int 1849int
1825qla2x00_full_login_lip(scsi_qla_host_t *ha) 1850qla2x00_full_login_lip(scsi_qla_host_t *vha)
1826{ 1851{
1827 int rval; 1852 int rval;
1828 mbx_cmd_t mc; 1853 mbx_cmd_t mc;
1829 mbx_cmd_t *mcp = &mc; 1854 mbx_cmd_t *mcp = &mc;
1830 1855
1831 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1856 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1832 ha->host_no)); 1857 vha->host_no));
1833 1858
1834 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1859 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1835 mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0; 1860 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1836 mcp->mb[2] = 0; 1861 mcp->mb[2] = 0;
1837 mcp->mb[3] = 0; 1862 mcp->mb[3] = 0;
1838 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1863 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1839 mcp->in_mb = MBX_0; 1864 mcp->in_mb = MBX_0;
1840 mcp->tov = MBX_TOV_SECONDS; 1865 mcp->tov = MBX_TOV_SECONDS;
1841 mcp->flags = 0; 1866 mcp->flags = 0;
1842 rval = qla2x00_mailbox_command(ha, mcp); 1867 rval = qla2x00_mailbox_command(vha, mcp);
1843 1868
1844 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1845 /*EMPTY*/ 1870 /*EMPTY*/
1846 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1871 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
1847 ha->host_no, rval)); 1872 vha->host_no, rval));
1848 } else { 1873 } else {
1849 /*EMPTY*/ 1874 /*EMPTY*/
1850 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1875 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
1851 ha->host_no)); 1876 vha->host_no));
1852 } 1877 }
1853 1878
1854 return rval; 1879 return rval;
@@ -1867,7 +1892,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1867 * Kernel context. 1892 * Kernel context.
1868 */ 1893 */
1869int 1894int
1870qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma, 1895qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
1871 uint16_t *entries) 1896 uint16_t *entries)
1872{ 1897{
1873 int rval; 1898 int rval;
@@ -1875,20 +1900,20 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1875 mbx_cmd_t *mcp = &mc; 1900 mbx_cmd_t *mcp = &mc;
1876 1901
1877 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 1902 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
1878 ha->host_no)); 1903 vha->host_no));
1879 1904
1880 if (id_list == NULL) 1905 if (id_list == NULL)
1881 return QLA_FUNCTION_FAILED; 1906 return QLA_FUNCTION_FAILED;
1882 1907
1883 mcp->mb[0] = MBC_GET_ID_LIST; 1908 mcp->mb[0] = MBC_GET_ID_LIST;
1884 mcp->out_mb = MBX_0; 1909 mcp->out_mb = MBX_0;
1885 if (IS_FWI2_CAPABLE(ha)) { 1910 if (IS_FWI2_CAPABLE(vha->hw)) {
1886 mcp->mb[2] = MSW(id_list_dma); 1911 mcp->mb[2] = MSW(id_list_dma);
1887 mcp->mb[3] = LSW(id_list_dma); 1912 mcp->mb[3] = LSW(id_list_dma);
1888 mcp->mb[6] = MSW(MSD(id_list_dma)); 1913 mcp->mb[6] = MSW(MSD(id_list_dma));
1889 mcp->mb[7] = LSW(MSD(id_list_dma)); 1914 mcp->mb[7] = LSW(MSD(id_list_dma));
1890 mcp->mb[8] = 0; 1915 mcp->mb[8] = 0;
1891 mcp->mb[9] = ha->vp_idx; 1916 mcp->mb[9] = vha->vp_idx;
1892 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1917 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1893 } else { 1918 } else {
1894 mcp->mb[1] = MSW(id_list_dma); 1919 mcp->mb[1] = MSW(id_list_dma);
@@ -1900,16 +1925,16 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1900 mcp->in_mb = MBX_1|MBX_0; 1925 mcp->in_mb = MBX_1|MBX_0;
1901 mcp->tov = MBX_TOV_SECONDS; 1926 mcp->tov = MBX_TOV_SECONDS;
1902 mcp->flags = 0; 1927 mcp->flags = 0;
1903 rval = qla2x00_mailbox_command(ha, mcp); 1928 rval = qla2x00_mailbox_command(vha, mcp);
1904 1929
1905 if (rval != QLA_SUCCESS) { 1930 if (rval != QLA_SUCCESS) {
1906 /*EMPTY*/ 1931 /*EMPTY*/
1907 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 1932 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
1908 ha->host_no, rval)); 1933 vha->host_no, rval));
1909 } else { 1934 } else {
1910 *entries = mcp->mb[1]; 1935 *entries = mcp->mb[1];
1911 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 1936 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
1912 ha->host_no)); 1937 vha->host_no));
1913 } 1938 }
1914 1939
1915 return rval; 1940 return rval;
@@ -1929,7 +1954,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1929 * Kernel context. 1954 * Kernel context.
1930 */ 1955 */
1931int 1956int
1932qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt, 1957qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
1933 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, 1958 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
1934 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) 1959 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
1935{ 1960{
@@ -1937,22 +1962,22 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1937 mbx_cmd_t mc; 1962 mbx_cmd_t mc;
1938 mbx_cmd_t *mcp = &mc; 1963 mbx_cmd_t *mcp = &mc;
1939 1964
1940 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1965 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1941 1966
1942 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 1967 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
1943 mcp->out_mb = MBX_0; 1968 mcp->out_mb = MBX_0;
1944 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1969 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1945 mcp->tov = MBX_TOV_SECONDS; 1970 mcp->tov = MBX_TOV_SECONDS;
1946 mcp->flags = 0; 1971 mcp->flags = 0;
1947 rval = qla2x00_mailbox_command(ha, mcp); 1972 rval = qla2x00_mailbox_command(vha, mcp);
1948 1973
1949 if (rval != QLA_SUCCESS) { 1974 if (rval != QLA_SUCCESS) {
1950 /*EMPTY*/ 1975 /*EMPTY*/
1951 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 1976 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
1952 ha->host_no, mcp->mb[0])); 1977 vha->host_no, mcp->mb[0]));
1953 } else { 1978 } else {
1954 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 1979 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
1955 "mb7=%x mb10=%x mb11=%x.\n", __func__, ha->host_no, 1980 "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
1956 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], 1981 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
1957 mcp->mb[10], mcp->mb[11])); 1982 mcp->mb[10], mcp->mb[11]));
1958 1983
@@ -1964,7 +1989,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1964 *cur_iocb_cnt = mcp->mb[7]; 1989 *cur_iocb_cnt = mcp->mb[7];
1965 if (orig_iocb_cnt) 1990 if (orig_iocb_cnt)
1966 *orig_iocb_cnt = mcp->mb[10]; 1991 *orig_iocb_cnt = mcp->mb[10];
1967 if (ha->flags.npiv_supported && max_npiv_vports) 1992 if (vha->hw->flags.npiv_supported && max_npiv_vports)
1968 *max_npiv_vports = mcp->mb[11]; 1993 *max_npiv_vports = mcp->mb[11];
1969 } 1994 }
1970 1995
@@ -1987,18 +2012,19 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1987 * Kernel context. 2012 * Kernel context.
1988 */ 2013 */
1989int 2014int
1990qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) 2015qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
1991{ 2016{
1992 int rval; 2017 int rval;
1993 mbx_cmd_t mc; 2018 mbx_cmd_t mc;
1994 mbx_cmd_t *mcp = &mc; 2019 mbx_cmd_t *mcp = &mc;
1995 char *pmap; 2020 char *pmap;
1996 dma_addr_t pmap_dma; 2021 dma_addr_t pmap_dma;
2022 struct qla_hw_data *ha = vha->hw;
1997 2023
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2024 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 2025 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2026 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2027 __func__, vha->host_no));
2002 return QLA_MEMORY_ALLOC_FAILED; 2028 return QLA_MEMORY_ALLOC_FAILED;
2003 } 2029 }
2004 memset(pmap, 0, FCAL_MAP_SIZE); 2030 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2013,11 +2039,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2013 mcp->buf_size = FCAL_MAP_SIZE; 2039 mcp->buf_size = FCAL_MAP_SIZE;
2014 mcp->flags = MBX_DMA_IN; 2040 mcp->flags = MBX_DMA_IN;
2015 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2041 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2016 rval = qla2x00_mailbox_command(ha, mcp); 2042 rval = qla2x00_mailbox_command(vha, mcp);
2017 2043
2018 if (rval == QLA_SUCCESS) { 2044 if (rval == QLA_SUCCESS) {
2019 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2045 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
2020 "size (%x)\n", __func__, ha->host_no, mcp->mb[0], 2046 "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
2021 mcp->mb[1], (unsigned)pmap[0])); 2047 mcp->mb[1], (unsigned)pmap[0]));
2022 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2048 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
2023 2049
@@ -2028,9 +2054,9 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2028 2054
2029 if (rval != QLA_SUCCESS) { 2055 if (rval != QLA_SUCCESS) {
2030 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2056 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2031 ha->host_no, rval)); 2057 vha->host_no, rval));
2032 } else { 2058 } else {
2033 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2059 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2034 } 2060 }
2035 2061
2036 return rval; 2062 return rval;
@@ -2051,15 +2077,16 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2051 * BIT_1 = mailbox error. 2077 * BIT_1 = mailbox error.
2052 */ 2078 */
2053int 2079int
2054qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2080qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2055 struct link_statistics *stats, dma_addr_t stats_dma) 2081 struct link_statistics *stats, dma_addr_t stats_dma)
2056{ 2082{
2057 int rval; 2083 int rval;
2058 mbx_cmd_t mc; 2084 mbx_cmd_t mc;
2059 mbx_cmd_t *mcp = &mc; 2085 mbx_cmd_t *mcp = &mc;
2060 uint32_t *siter, *diter, dwords; 2086 uint32_t *siter, *diter, dwords;
2087 struct qla_hw_data *ha = vha->hw;
2061 2088
2062 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2089 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2063 2090
2064 mcp->mb[0] = MBC_GET_LINK_STATUS; 2091 mcp->mb[0] = MBC_GET_LINK_STATUS;
2065 mcp->mb[2] = MSW(stats_dma); 2092 mcp->mb[2] = MSW(stats_dma);
@@ -2084,12 +2111,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2084 } 2111 }
2085 mcp->tov = MBX_TOV_SECONDS; 2112 mcp->tov = MBX_TOV_SECONDS;
2086 mcp->flags = IOCTL_CMD; 2113 mcp->flags = IOCTL_CMD;
2087 rval = qla2x00_mailbox_command(ha, mcp); 2114 rval = qla2x00_mailbox_command(vha, mcp);
2088 2115
2089 if (rval == QLA_SUCCESS) { 2116 if (rval == QLA_SUCCESS) {
2090 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2117 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2091 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2118 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2092 __func__, ha->host_no, mcp->mb[0])); 2119 __func__, vha->host_no, mcp->mb[0]));
2093 rval = QLA_FUNCTION_FAILED; 2120 rval = QLA_FUNCTION_FAILED;
2094 } else { 2121 } else {
2095 /* Copy over data -- firmware data is LE. */ 2122 /* Copy over data -- firmware data is LE. */
@@ -2101,14 +2128,14 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2101 } else { 2128 } else {
2102 /* Failed. */ 2129 /* Failed. */
2103 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2130 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2104 ha->host_no, rval)); 2131 vha->host_no, rval));
2105 } 2132 }
2106 2133
2107 return rval; 2134 return rval;
2108} 2135}
2109 2136
2110int 2137int
2111qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats, 2138qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2112 dma_addr_t stats_dma) 2139 dma_addr_t stats_dma)
2113{ 2140{
2114 int rval; 2141 int rval;
@@ -2116,7 +2143,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2116 mbx_cmd_t *mcp = &mc; 2143 mbx_cmd_t *mcp = &mc;
2117 uint32_t *siter, *diter, dwords; 2144 uint32_t *siter, *diter, dwords;
2118 2145
2119 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2146 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2120 2147
2121 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2148 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2122 mcp->mb[2] = MSW(stats_dma); 2149 mcp->mb[2] = MSW(stats_dma);
@@ -2124,18 +2151,18 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2124 mcp->mb[6] = MSW(MSD(stats_dma)); 2151 mcp->mb[6] = MSW(MSD(stats_dma));
2125 mcp->mb[7] = LSW(MSD(stats_dma)); 2152 mcp->mb[7] = LSW(MSD(stats_dma));
2126 mcp->mb[8] = sizeof(struct link_statistics) / 4; 2153 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2127 mcp->mb[9] = ha->vp_idx; 2154 mcp->mb[9] = vha->vp_idx;
2128 mcp->mb[10] = 0; 2155 mcp->mb[10] = 0;
2129 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2156 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2130 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2157 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2131 mcp->tov = MBX_TOV_SECONDS; 2158 mcp->tov = MBX_TOV_SECONDS;
2132 mcp->flags = IOCTL_CMD; 2159 mcp->flags = IOCTL_CMD;
2133 rval = qla2x00_mailbox_command(ha, mcp); 2160 rval = qla2x00_mailbox_command(vha, mcp);
2134 2161
2135 if (rval == QLA_SUCCESS) { 2162 if (rval == QLA_SUCCESS) {
2136 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2163 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2137 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2164 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2138 __func__, ha->host_no, mcp->mb[0])); 2165 __func__, vha->host_no, mcp->mb[0]));
2139 rval = QLA_FUNCTION_FAILED; 2166 rval = QLA_FUNCTION_FAILED;
2140 } else { 2167 } else {
2141 /* Copy over data -- firmware data is LE. */ 2168 /* Copy over data -- firmware data is LE. */
@@ -2147,14 +2174,14 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2147 } else { 2174 } else {
2148 /* Failed. */ 2175 /* Failed. */
2149 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2176 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2150 ha->host_no, rval)); 2177 vha->host_no, rval));
2151 } 2178 }
2152 2179
2153 return rval; 2180 return rval;
2154} 2181}
2155 2182
2156int 2183int
2157qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2184qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2158{ 2185{
2159 int rval; 2186 int rval;
2160 fc_port_t *fcport; 2187 fc_port_t *fcport;
@@ -2163,18 +2190,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2163 struct abort_entry_24xx *abt; 2190 struct abort_entry_24xx *abt;
2164 dma_addr_t abt_dma; 2191 dma_addr_t abt_dma;
2165 uint32_t handle; 2192 uint32_t handle;
2166 scsi_qla_host_t *pha = to_qla_parent(ha); 2193 struct qla_hw_data *ha = vha->hw;
2167 2194
2168 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2195 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2169 2196
2170 fcport = sp->fcport; 2197 fcport = sp->fcport;
2171 2198
2172 spin_lock_irqsave(&pha->hardware_lock, flags); 2199 spin_lock_irqsave(&ha->hardware_lock, flags);
2173 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2200 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2174 if (pha->outstanding_cmds[handle] == sp) 2201 if (req->outstanding_cmds[handle] == sp)
2175 break; 2202 break;
2176 } 2203 }
2177 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2178 if (handle == MAX_OUTSTANDING_COMMANDS) { 2205 if (handle == MAX_OUTSTANDING_COMMANDS) {
2179 /* Command not found. */ 2206 /* Command not found. */
2180 return QLA_FUNCTION_FAILED; 2207 return QLA_FUNCTION_FAILED;
@@ -2183,7 +2210,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2183 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2210 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2184 if (abt == NULL) { 2211 if (abt == NULL) {
2185 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2212 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
2186 __func__, ha->host_no)); 2213 __func__, vha->host_no));
2187 return QLA_MEMORY_ALLOC_FAILED; 2214 return QLA_MEMORY_ALLOC_FAILED;
2188 } 2215 }
2189 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2216 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2196,22 +2223,25 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2196 abt->port_id[1] = fcport->d_id.b.area; 2223 abt->port_id[1] = fcport->d_id.b.area;
2197 abt->port_id[2] = fcport->d_id.b.domain; 2224 abt->port_id[2] = fcport->d_id.b.domain;
2198 abt->vp_index = fcport->vp_idx; 2225 abt->vp_index = fcport->vp_idx;
2199 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2226
2227 abt->req_que_no = cpu_to_le16(req->id);
2228
2229 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2200 if (rval != QLA_SUCCESS) { 2230 if (rval != QLA_SUCCESS) {
2201 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2231 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
2202 __func__, ha->host_no, rval)); 2232 __func__, vha->host_no, rval));
2203 } else if (abt->entry_status != 0) { 2233 } else if (abt->entry_status != 0) {
2204 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2234 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2205 "-- error status (%x).\n", __func__, ha->host_no, 2235 "-- error status (%x).\n", __func__, vha->host_no,
2206 abt->entry_status)); 2236 abt->entry_status));
2207 rval = QLA_FUNCTION_FAILED; 2237 rval = QLA_FUNCTION_FAILED;
2208 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2238 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2209 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2239 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2210 "-- completion status (%x).\n", __func__, ha->host_no, 2240 "-- completion status (%x).\n", __func__, vha->host_no,
2211 le16_to_cpu(abt->nport_handle))); 2241 le16_to_cpu(abt->nport_handle)));
2212 rval = QLA_FUNCTION_FAILED; 2242 rval = QLA_FUNCTION_FAILED;
2213 } else { 2243 } else {
2214 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2244 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2215 } 2245 }
2216 2246
2217 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2247 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2233,16 +2263,21 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2233 int rval, rval2; 2263 int rval, rval2;
2234 struct tsk_mgmt_cmd *tsk; 2264 struct tsk_mgmt_cmd *tsk;
2235 dma_addr_t tsk_dma; 2265 dma_addr_t tsk_dma;
2236 scsi_qla_host_t *ha, *pha; 2266 scsi_qla_host_t *vha;
2267 struct qla_hw_data *ha;
2268 struct req_que *req;
2269 struct rsp_que *rsp;
2237 2270
2238 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 2271 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2239 2272
2240 ha = fcport->ha; 2273 vha = fcport->vha;
2241 pha = to_qla_parent(ha); 2274 ha = vha->hw;
2242 tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2275 req = ha->req_q_map[0];
2276 rsp = ha->rsp_q_map[0];
2277 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2243 if (tsk == NULL) { 2278 if (tsk == NULL) {
2244 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2279 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
2245 "IOCB.\n", __func__, ha->host_no)); 2280 "IOCB.\n", __func__, vha->host_no));
2246 return QLA_MEMORY_ALLOC_FAILED; 2281 return QLA_MEMORY_ALLOC_FAILED;
2247 } 2282 }
2248 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2283 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2262,34 +2297,34 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2262 sizeof(tsk->p.tsk.lun)); 2297 sizeof(tsk->p.tsk.lun));
2263 } 2298 }
2264 2299
2265 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2300 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2266 if (rval != QLA_SUCCESS) { 2301 if (rval != QLA_SUCCESS) {
2267 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2302 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
2268 "(%x).\n", __func__, ha->host_no, name, rval)); 2303 "(%x).\n", __func__, vha->host_no, name, rval));
2269 } else if (tsk->p.sts.entry_status != 0) { 2304 } else if (tsk->p.sts.entry_status != 0) {
2270 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2305 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2271 "-- error status (%x).\n", __func__, ha->host_no, 2306 "-- error status (%x).\n", __func__, vha->host_no,
2272 tsk->p.sts.entry_status)); 2307 tsk->p.sts.entry_status));
2273 rval = QLA_FUNCTION_FAILED; 2308 rval = QLA_FUNCTION_FAILED;
2274 } else if (tsk->p.sts.comp_status != 2309 } else if (tsk->p.sts.comp_status !=
2275 __constant_cpu_to_le16(CS_COMPLETE)) { 2310 __constant_cpu_to_le16(CS_COMPLETE)) {
2276 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2311 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2277 "-- completion status (%x).\n", __func__, 2312 "-- completion status (%x).\n", __func__,
2278 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); 2313 vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
2279 rval = QLA_FUNCTION_FAILED; 2314 rval = QLA_FUNCTION_FAILED;
2280 } 2315 }
2281 2316
2282 /* Issue marker IOCB. */ 2317 /* Issue marker IOCB. */
2283 rval2 = qla2x00_marker(ha, fcport->loop_id, l, 2318 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2284 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2319 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2285 if (rval2 != QLA_SUCCESS) { 2320 if (rval2 != QLA_SUCCESS) {
2286 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2321 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
2287 "(%x).\n", __func__, ha->host_no, rval2)); 2322 "(%x).\n", __func__, vha->host_no, rval2));
2288 } else { 2323 } else {
2289 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2324 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2290 } 2325 }
2291 2326
2292 dma_pool_free(pha->s_dma_pool, tsk, tsk_dma); 2327 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2293 2328
2294 return rval; 2329 return rval;
2295} 2330}
@@ -2307,29 +2342,30 @@ qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
2307} 2342}
2308 2343
2309int 2344int
2310qla2x00_system_error(scsi_qla_host_t *ha) 2345qla2x00_system_error(scsi_qla_host_t *vha)
2311{ 2346{
2312 int rval; 2347 int rval;
2313 mbx_cmd_t mc; 2348 mbx_cmd_t mc;
2314 mbx_cmd_t *mcp = &mc; 2349 mbx_cmd_t *mcp = &mc;
2350 struct qla_hw_data *ha = vha->hw;
2315 2351
2316 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2352 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2317 return QLA_FUNCTION_FAILED; 2353 return QLA_FUNCTION_FAILED;
2318 2354
2319 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2355 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2320 2356
2321 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2357 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2322 mcp->out_mb = MBX_0; 2358 mcp->out_mb = MBX_0;
2323 mcp->in_mb = MBX_0; 2359 mcp->in_mb = MBX_0;
2324 mcp->tov = 5; 2360 mcp->tov = 5;
2325 mcp->flags = 0; 2361 mcp->flags = 0;
2326 rval = qla2x00_mailbox_command(ha, mcp); 2362 rval = qla2x00_mailbox_command(vha, mcp);
2327 2363
2328 if (rval != QLA_SUCCESS) { 2364 if (rval != QLA_SUCCESS) {
2329 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2365 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2330 ha->host_no, rval)); 2366 vha->host_no, rval));
2331 } else { 2367 } else {
2332 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2368 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2333 } 2369 }
2334 2370
2335 return rval; 2371 return rval;
@@ -2342,14 +2378,14 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2342 * Returns 2378 * Returns
2343 */ 2379 */
2344int 2380int
2345qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g, 2381qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2346 uint16_t sw_em_2g, uint16_t sw_em_4g) 2382 uint16_t sw_em_2g, uint16_t sw_em_4g)
2347{ 2383{
2348 int rval; 2384 int rval;
2349 mbx_cmd_t mc; 2385 mbx_cmd_t mc;
2350 mbx_cmd_t *mcp = &mc; 2386 mbx_cmd_t *mcp = &mc;
2351 2387
2352 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2388 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2353 2389
2354 mcp->mb[0] = MBC_SERDES_PARAMS; 2390 mcp->mb[0] = MBC_SERDES_PARAMS;
2355 mcp->mb[1] = BIT_0; 2391 mcp->mb[1] = BIT_0;
@@ -2360,61 +2396,61 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
2360 mcp->in_mb = MBX_0; 2396 mcp->in_mb = MBX_0;
2361 mcp->tov = MBX_TOV_SECONDS; 2397 mcp->tov = MBX_TOV_SECONDS;
2362 mcp->flags = 0; 2398 mcp->flags = 0;
2363 rval = qla2x00_mailbox_command(ha, mcp); 2399 rval = qla2x00_mailbox_command(vha, mcp);
2364 2400
2365 if (rval != QLA_SUCCESS) { 2401 if (rval != QLA_SUCCESS) {
2366 /*EMPTY*/ 2402 /*EMPTY*/
2367 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2403 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2368 ha->host_no, rval, mcp->mb[0])); 2404 vha->host_no, rval, mcp->mb[0]));
2369 } else { 2405 } else {
2370 /*EMPTY*/ 2406 /*EMPTY*/
2371 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2407 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2372 } 2408 }
2373 2409
2374 return rval; 2410 return rval;
2375} 2411}
2376 2412
2377int 2413int
2378qla2x00_stop_firmware(scsi_qla_host_t *ha) 2414qla2x00_stop_firmware(scsi_qla_host_t *vha)
2379{ 2415{
2380 int rval; 2416 int rval;
2381 mbx_cmd_t mc; 2417 mbx_cmd_t mc;
2382 mbx_cmd_t *mcp = &mc; 2418 mbx_cmd_t *mcp = &mc;
2383 2419
2384 if (!IS_FWI2_CAPABLE(ha)) 2420 if (!IS_FWI2_CAPABLE(vha->hw))
2385 return QLA_FUNCTION_FAILED; 2421 return QLA_FUNCTION_FAILED;
2386 2422
2387 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2423 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2388 2424
2389 mcp->mb[0] = MBC_STOP_FIRMWARE; 2425 mcp->mb[0] = MBC_STOP_FIRMWARE;
2390 mcp->out_mb = MBX_0; 2426 mcp->out_mb = MBX_0;
2391 mcp->in_mb = MBX_0; 2427 mcp->in_mb = MBX_0;
2392 mcp->tov = 5; 2428 mcp->tov = 5;
2393 mcp->flags = 0; 2429 mcp->flags = 0;
2394 rval = qla2x00_mailbox_command(ha, mcp); 2430 rval = qla2x00_mailbox_command(vha, mcp);
2395 2431
2396 if (rval != QLA_SUCCESS) { 2432 if (rval != QLA_SUCCESS) {
2397 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2433 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2398 ha->host_no, rval)); 2434 vha->host_no, rval));
2399 } else { 2435 } else {
2400 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2436 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2401 } 2437 }
2402 2438
2403 return rval; 2439 return rval;
2404} 2440}
2405 2441
2406int 2442int
2407qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma, 2443qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2408 uint16_t buffers) 2444 uint16_t buffers)
2409{ 2445{
2410 int rval; 2446 int rval;
2411 mbx_cmd_t mc; 2447 mbx_cmd_t mc;
2412 mbx_cmd_t *mcp = &mc; 2448 mbx_cmd_t *mcp = &mc;
2413 2449
2414 if (!IS_FWI2_CAPABLE(ha)) 2450 if (!IS_FWI2_CAPABLE(vha->hw))
2415 return QLA_FUNCTION_FAILED; 2451 return QLA_FUNCTION_FAILED;
2416 2452
2417 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2453 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2418 2454
2419 mcp->mb[0] = MBC_TRACE_CONTROL; 2455 mcp->mb[0] = MBC_TRACE_CONTROL;
2420 mcp->mb[1] = TC_EFT_ENABLE; 2456 mcp->mb[1] = TC_EFT_ENABLE;
@@ -2428,28 +2464,28 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
2428 mcp->in_mb = MBX_1|MBX_0; 2464 mcp->in_mb = MBX_1|MBX_0;
2429 mcp->tov = MBX_TOV_SECONDS; 2465 mcp->tov = MBX_TOV_SECONDS;
2430 mcp->flags = 0; 2466 mcp->flags = 0;
2431 rval = qla2x00_mailbox_command(ha, mcp); 2467 rval = qla2x00_mailbox_command(vha, mcp);
2432 if (rval != QLA_SUCCESS) { 2468 if (rval != QLA_SUCCESS) {
2433 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2469 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2434 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2470 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2435 } else { 2471 } else {
2436 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2472 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2437 } 2473 }
2438 2474
2439 return rval; 2475 return rval;
2440} 2476}
2441 2477
2442int 2478int
2443qla2x00_disable_eft_trace(scsi_qla_host_t *ha) 2479qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2444{ 2480{
2445 int rval; 2481 int rval;
2446 mbx_cmd_t mc; 2482 mbx_cmd_t mc;
2447 mbx_cmd_t *mcp = &mc; 2483 mbx_cmd_t *mcp = &mc;
2448 2484
2449 if (!IS_FWI2_CAPABLE(ha)) 2485 if (!IS_FWI2_CAPABLE(vha->hw))
2450 return QLA_FUNCTION_FAILED; 2486 return QLA_FUNCTION_FAILED;
2451 2487
2452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2488 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2453 2489
2454 mcp->mb[0] = MBC_TRACE_CONTROL; 2490 mcp->mb[0] = MBC_TRACE_CONTROL;
2455 mcp->mb[1] = TC_EFT_DISABLE; 2491 mcp->mb[1] = TC_EFT_DISABLE;
@@ -2457,29 +2493,29 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
2457 mcp->in_mb = MBX_1|MBX_0; 2493 mcp->in_mb = MBX_1|MBX_0;
2458 mcp->tov = MBX_TOV_SECONDS; 2494 mcp->tov = MBX_TOV_SECONDS;
2459 mcp->flags = 0; 2495 mcp->flags = 0;
2460 rval = qla2x00_mailbox_command(ha, mcp); 2496 rval = qla2x00_mailbox_command(vha, mcp);
2461 if (rval != QLA_SUCCESS) { 2497 if (rval != QLA_SUCCESS) {
2462 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2498 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2463 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2499 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2464 } else { 2500 } else {
2465 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2501 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2466 } 2502 }
2467 2503
2468 return rval; 2504 return rval;
2469} 2505}
2470 2506
2471int 2507int
2472qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma, 2508qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2473 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 2509 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2474{ 2510{
2475 int rval; 2511 int rval;
2476 mbx_cmd_t mc; 2512 mbx_cmd_t mc;
2477 mbx_cmd_t *mcp = &mc; 2513 mbx_cmd_t *mcp = &mc;
2478 2514
2479 if (!IS_QLA25XX(ha)) 2515 if (!IS_QLA25XX(vha->hw))
2480 return QLA_FUNCTION_FAILED; 2516 return QLA_FUNCTION_FAILED;
2481 2517
2482 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2483 2519
2484 mcp->mb[0] = MBC_TRACE_CONTROL; 2520 mcp->mb[0] = MBC_TRACE_CONTROL;
2485 mcp->mb[1] = TC_FCE_ENABLE; 2521 mcp->mb[1] = TC_FCE_ENABLE;
@@ -2497,12 +2533,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2497 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2498 mcp->tov = MBX_TOV_SECONDS; 2534 mcp->tov = MBX_TOV_SECONDS;
2499 mcp->flags = 0; 2535 mcp->flags = 0;
2500 rval = qla2x00_mailbox_command(ha, mcp); 2536 rval = qla2x00_mailbox_command(vha, mcp);
2501 if (rval != QLA_SUCCESS) { 2537 if (rval != QLA_SUCCESS) {
2502 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2503 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2539 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2504 } else { 2540 } else {
2505 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2541 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2506 2542
2507 if (mb) 2543 if (mb)
2508 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2544 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2514,16 +2550,16 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2514} 2550}
2515 2551
2516int 2552int
2517qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd) 2553qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2518{ 2554{
2519 int rval; 2555 int rval;
2520 mbx_cmd_t mc; 2556 mbx_cmd_t mc;
2521 mbx_cmd_t *mcp = &mc; 2557 mbx_cmd_t *mcp = &mc;
2522 2558
2523 if (!IS_FWI2_CAPABLE(ha)) 2559 if (!IS_FWI2_CAPABLE(vha->hw))
2524 return QLA_FUNCTION_FAILED; 2560 return QLA_FUNCTION_FAILED;
2525 2561
2526 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2562 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2527 2563
2528 mcp->mb[0] = MBC_TRACE_CONTROL; 2564 mcp->mb[0] = MBC_TRACE_CONTROL;
2529 mcp->mb[1] = TC_FCE_DISABLE; 2565 mcp->mb[1] = TC_FCE_DISABLE;
@@ -2533,12 +2569,12 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2533 MBX_1|MBX_0; 2569 MBX_1|MBX_0;
2534 mcp->tov = MBX_TOV_SECONDS; 2570 mcp->tov = MBX_TOV_SECONDS;
2535 mcp->flags = 0; 2571 mcp->flags = 0;
2536 rval = qla2x00_mailbox_command(ha, mcp); 2572 rval = qla2x00_mailbox_command(vha, mcp);
2537 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2574 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2539 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2575 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2540 } else { 2576 } else {
2541 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2577 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2542 2578
2543 if (wr) 2579 if (wr)
2544 *wr = (uint64_t) mcp->mb[5] << 48 | 2580 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2556,17 +2592,17 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2556} 2592}
2557 2593
2558int 2594int
2559qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr, 2595qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2560 uint16_t off, uint16_t count) 2596 uint16_t off, uint16_t count)
2561{ 2597{
2562 int rval; 2598 int rval;
2563 mbx_cmd_t mc; 2599 mbx_cmd_t mc;
2564 mbx_cmd_t *mcp = &mc; 2600 mbx_cmd_t *mcp = &mc;
2565 2601
2566 if (!IS_FWI2_CAPABLE(ha)) 2602 if (!IS_FWI2_CAPABLE(vha->hw))
2567 return QLA_FUNCTION_FAILED; 2603 return QLA_FUNCTION_FAILED;
2568 2604
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2605 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2570 2606
2571 mcp->mb[0] = MBC_READ_SFP; 2607 mcp->mb[0] = MBC_READ_SFP;
2572 mcp->mb[1] = addr; 2608 mcp->mb[1] = addr;
@@ -2581,30 +2617,30 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2581 mcp->in_mb = MBX_0; 2617 mcp->in_mb = MBX_0;
2582 mcp->tov = MBX_TOV_SECONDS; 2618 mcp->tov = MBX_TOV_SECONDS;
2583 mcp->flags = 0; 2619 mcp->flags = 0;
2584 rval = qla2x00_mailbox_command(ha, mcp); 2620 rval = qla2x00_mailbox_command(vha, mcp);
2585 2621
2586 if (rval != QLA_SUCCESS) { 2622 if (rval != QLA_SUCCESS) {
2587 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2623 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2588 ha->host_no, rval, mcp->mb[0])); 2624 vha->host_no, rval, mcp->mb[0]));
2589 } else { 2625 } else {
2590 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2626 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2591 } 2627 }
2592 2628
2593 return rval; 2629 return rval;
2594} 2630}
2595 2631
2596int 2632int
2597qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, 2633qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2598 uint16_t port_speed, uint16_t *mb) 2634 uint16_t port_speed, uint16_t *mb)
2599{ 2635{
2600 int rval; 2636 int rval;
2601 mbx_cmd_t mc; 2637 mbx_cmd_t mc;
2602 mbx_cmd_t *mcp = &mc; 2638 mbx_cmd_t *mcp = &mc;
2603 2639
2604 if (!IS_IIDMA_CAPABLE(ha)) 2640 if (!IS_IIDMA_CAPABLE(vha->hw))
2605 return QLA_FUNCTION_FAILED; 2641 return QLA_FUNCTION_FAILED;
2606 2642
2607 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2608 2644
2609 mcp->mb[0] = MBC_PORT_PARAMS; 2645 mcp->mb[0] = MBC_PORT_PARAMS;
2610 mcp->mb[1] = loop_id; 2646 mcp->mb[1] = loop_id;
@@ -2615,7 +2651,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2615 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2651 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2616 mcp->tov = MBX_TOV_SECONDS; 2652 mcp->tov = MBX_TOV_SECONDS;
2617 mcp->flags = 0; 2653 mcp->flags = 0;
2618 rval = qla2x00_mailbox_command(ha, mcp); 2654 rval = qla2x00_mailbox_command(vha, mcp);
2619 2655
2620 /* Return mailbox statuses. */ 2656 /* Return mailbox statuses. */
2621 if (mb != NULL) { 2657 if (mb != NULL) {
@@ -2628,28 +2664,29 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2628 2664
2629 if (rval != QLA_SUCCESS) { 2665 if (rval != QLA_SUCCESS) {
2630 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2666 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2631 ha->host_no, rval)); 2667 vha->host_no, rval));
2632 } else { 2668 } else {
2633 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2669 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2634 } 2670 }
2635 2671
2636 return rval; 2672 return rval;
2637} 2673}
2638 2674
2639void 2675void
2640qla24xx_report_id_acquisition(scsi_qla_host_t *ha, 2676qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2641 struct vp_rpt_id_entry_24xx *rptid_entry) 2677 struct vp_rpt_id_entry_24xx *rptid_entry)
2642{ 2678{
2643 uint8_t vp_idx; 2679 uint8_t vp_idx;
2644 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2680 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2645 scsi_qla_host_t *vha; 2681 struct qla_hw_data *ha = vha->hw;
2682 scsi_qla_host_t *vp;
2646 2683
2647 if (rptid_entry->entry_status != 0) 2684 if (rptid_entry->entry_status != 0)
2648 return; 2685 return;
2649 2686
2650 if (rptid_entry->format == 0) { 2687 if (rptid_entry->format == 0) {
2651 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2688 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
2652 " number of VPs acquired %d\n", __func__, ha->host_no, 2689 " number of VPs acquired %d\n", __func__, vha->host_no,
2653 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count))); 2690 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
2654 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2691 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
2655 rptid_entry->port_id[2], rptid_entry->port_id[1], 2692 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2658,7 +2695,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2658 vp_idx = LSB(stat); 2695 vp_idx = LSB(stat);
2659 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2696 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2660 "- status %d - " 2697 "- status %d - "
2661 "with port id %02x%02x%02x\n",__func__,ha->host_no, 2698 "with port id %02x%02x%02x\n", __func__, vha->host_no,
2662 vp_idx, MSB(stat), 2699 vp_idx, MSB(stat),
2663 rptid_entry->port_id[2], rptid_entry->port_id[1], 2700 rptid_entry->port_id[2], rptid_entry->port_id[1],
2664 rptid_entry->port_id[0])); 2701 rptid_entry->port_id[0]));
@@ -2668,25 +2705,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2668 if (MSB(stat) == 1) 2705 if (MSB(stat) == 1)
2669 return; 2706 return;
2670 2707
2671 list_for_each_entry(vha, &ha->vp_list, vp_list) 2708 list_for_each_entry(vp, &ha->vp_list, list)
2672 if (vp_idx == vha->vp_idx) 2709 if (vp_idx == vp->vp_idx)
2673 break; 2710 break;
2674 2711 if (!vp)
2675 if (!vha)
2676 return; 2712 return;
2677 2713
2678 vha->d_id.b.domain = rptid_entry->port_id[2]; 2714 vp->d_id.b.domain = rptid_entry->port_id[2];
2679 vha->d_id.b.area = rptid_entry->port_id[1]; 2715 vp->d_id.b.area = rptid_entry->port_id[1];
2680 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 2716 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2681 2717
2682 /* 2718 /*
2683 * Cannot configure here as we are still sitting on the 2719 * Cannot configure here as we are still sitting on the
2684 * response queue. Handle it in dpc context. 2720 * response queue. Handle it in dpc context.
2685 */ 2721 */
2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 2722 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 2723 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2688 2724
2689 qla2xxx_wake_dpc(ha); 2725 qla2xxx_wake_dpc(vha);
2690 } 2726 }
2691} 2727}
2692 2728
@@ -2709,15 +2745,15 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2709 int rval; 2745 int rval;
2710 struct vp_config_entry_24xx *vpmod; 2746 struct vp_config_entry_24xx *vpmod;
2711 dma_addr_t vpmod_dma; 2747 dma_addr_t vpmod_dma;
2712 scsi_qla_host_t *pha; 2748 struct qla_hw_data *ha = vha->hw;
2749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2713 2750
2714 /* This can be called by the parent */ 2751 /* This can be called by the parent */
2715 pha = to_qla_parent(vha);
2716 2752
2717 vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2753 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2718 if (!vpmod) { 2754 if (!vpmod) {
2719 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2755 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
2720 "IOCB.\n", __func__, pha->host_no)); 2756 "IOCB.\n", __func__, vha->host_no));
2721 return QLA_MEMORY_ALLOC_FAILED; 2757 return QLA_MEMORY_ALLOC_FAILED;
2722 } 2758 }
2723 2759
@@ -2732,26 +2768,27 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2732 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 2768 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2733 vpmod->entry_count = 1; 2769 vpmod->entry_count = 1;
2734 2770
2735 rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0); 2771 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2736 if (rval != QLA_SUCCESS) { 2772 if (rval != QLA_SUCCESS) {
2737 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2773 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
2738 "(%x).\n", __func__, pha->host_no, rval)); 2774 "(%x).\n", __func__, base_vha->host_no, rval));
2739 } else if (vpmod->comp_status != 0) { 2775 } else if (vpmod->comp_status != 0) {
2740 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2776 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2741 "-- error status (%x).\n", __func__, pha->host_no, 2777 "-- error status (%x).\n", __func__, base_vha->host_no,
2742 vpmod->comp_status)); 2778 vpmod->comp_status));
2743 rval = QLA_FUNCTION_FAILED; 2779 rval = QLA_FUNCTION_FAILED;
2744 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2780 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2745 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2781 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2746 "-- completion status (%x).\n", __func__, pha->host_no, 2782 "-- completion status (%x).\n", __func__, base_vha->host_no,
2747 le16_to_cpu(vpmod->comp_status))); 2783 le16_to_cpu(vpmod->comp_status)));
2748 rval = QLA_FUNCTION_FAILED; 2784 rval = QLA_FUNCTION_FAILED;
2749 } else { 2785 } else {
2750 /* EMPTY */ 2786 /* EMPTY */
2751 DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no)); 2787 DEBUG11(printk("%s(%ld): done.\n", __func__,
2788 base_vha->host_no));
2752 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2789 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2753 } 2790 }
2754 dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma); 2791 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2755 2792
2756 return rval; 2793 return rval;
2757} 2794}
@@ -2778,11 +2815,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2778 int map, pos; 2815 int map, pos;
2779 struct vp_ctrl_entry_24xx *vce; 2816 struct vp_ctrl_entry_24xx *vce;
2780 dma_addr_t vce_dma; 2817 dma_addr_t vce_dma;
2781 scsi_qla_host_t *ha = vha->parent; 2818 struct qla_hw_data *ha = vha->hw;
2782 int vp_index = vha->vp_idx; 2819 int vp_index = vha->vp_idx;
2820 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2783 2821
2784 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 2822 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
2785 ha->host_no, vp_index)); 2823 vha->host_no, vp_index));
2786 2824
2787 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 2825 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
2788 return QLA_PARAMETER_ERROR; 2826 return QLA_PARAMETER_ERROR;
@@ -2791,7 +2829,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2791 if (!vce) { 2829 if (!vce) {
2792 DEBUG2_3(printk("%s(%ld): " 2830 DEBUG2_3(printk("%s(%ld): "
2793 "failed to allocate VP Control IOCB.\n", __func__, 2831 "failed to allocate VP Control IOCB.\n", __func__,
2794 ha->host_no)); 2832 base_vha->host_no));
2795 return QLA_MEMORY_ALLOC_FAILED; 2833 return QLA_MEMORY_ALLOC_FAILED;
2796 } 2834 }
2797 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 2835 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -2810,30 +2848,30 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2810 vce->vp_idx_map[map] |= 1 << pos; 2848 vce->vp_idx_map[map] |= 1 << pos;
2811 mutex_unlock(&ha->vport_lock); 2849 mutex_unlock(&ha->vport_lock);
2812 2850
2813 rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0); 2851 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
2814 if (rval != QLA_SUCCESS) { 2852 if (rval != QLA_SUCCESS) {
2815 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 2853 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
2816 "(%x).\n", __func__, ha->host_no, rval)); 2854 "(%x).\n", __func__, base_vha->host_no, rval));
2817 printk("%s(%ld): failed to issue VP control IOCB" 2855 printk("%s(%ld): failed to issue VP control IOCB"
2818 "(%x).\n", __func__, ha->host_no, rval); 2856 "(%x).\n", __func__, base_vha->host_no, rval);
2819 } else if (vce->entry_status != 0) { 2857 } else if (vce->entry_status != 0) {
2820 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2858 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2821 "-- error status (%x).\n", __func__, ha->host_no, 2859 "-- error status (%x).\n", __func__, base_vha->host_no,
2822 vce->entry_status)); 2860 vce->entry_status));
2823 printk("%s(%ld): failed to complete IOCB " 2861 printk("%s(%ld): failed to complete IOCB "
2824 "-- error status (%x).\n", __func__, ha->host_no, 2862 "-- error status (%x).\n", __func__, base_vha->host_no,
2825 vce->entry_status); 2863 vce->entry_status);
2826 rval = QLA_FUNCTION_FAILED; 2864 rval = QLA_FUNCTION_FAILED;
2827 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2865 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2828 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2866 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2829 "-- completion status (%x).\n", __func__, ha->host_no, 2867 "-- completion status (%x).\n", __func__, base_vha->host_no,
2830 le16_to_cpu(vce->comp_status))); 2868 le16_to_cpu(vce->comp_status)));
2831 printk("%s(%ld): failed to complete IOCB " 2869 printk("%s(%ld): failed to complete IOCB "
2832 "-- completion status (%x).\n", __func__, ha->host_no, 2870 "-- completion status (%x).\n", __func__, base_vha->host_no,
2833 le16_to_cpu(vce->comp_status)); 2871 le16_to_cpu(vce->comp_status));
2834 rval = QLA_FUNCTION_FAILED; 2872 rval = QLA_FUNCTION_FAILED;
2835 } else { 2873 } else {
2836 DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2874 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
2837 } 2875 }
2838 2876
2839 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 2877 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -2863,7 +2901,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2863 */ 2901 */
2864 2902
2865int 2903int
2866qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format, 2904qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
2867 uint16_t vp_idx) 2905 uint16_t vp_idx)
2868{ 2906{
2869 int rval; 2907 int rval;
@@ -2884,7 +2922,7 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2884 mcp->in_mb = MBX_0|MBX_1; 2922 mcp->in_mb = MBX_0|MBX_1;
2885 mcp->tov = MBX_TOV_SECONDS; 2923 mcp->tov = MBX_TOV_SECONDS;
2886 mcp->flags = 0; 2924 mcp->flags = 0;
2887 rval = qla2x00_mailbox_command(ha, mcp); 2925 rval = qla2x00_mailbox_command(vha, mcp);
2888 2926
2889 if (rval == QLA_SUCCESS) { 2927 if (rval == QLA_SUCCESS) {
2890 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2928 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -2897,16 +2935,16 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2897} 2935}
2898 2936
2899int 2937int
2900qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr, 2938qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
2901 uint32_t size) 2939 uint32_t size)
2902{ 2940{
2903 int rval; 2941 int rval;
2904 mbx_cmd_t mc; 2942 mbx_cmd_t mc;
2905 mbx_cmd_t *mcp = &mc; 2943 mbx_cmd_t *mcp = &mc;
2906 2944
2907 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2945 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2908 2946
2909 if (MSW(addr) || IS_FWI2_CAPABLE(ha)) { 2947 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
2910 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 2948 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
2911 mcp->mb[8] = MSW(addr); 2949 mcp->mb[8] = MSW(addr);
2912 mcp->out_mb = MBX_8|MBX_0; 2950 mcp->out_mb = MBX_8|MBX_0;
@@ -2920,7 +2958,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2920 mcp->mb[6] = MSW(MSD(req_dma)); 2958 mcp->mb[6] = MSW(MSD(req_dma));
2921 mcp->mb[7] = LSW(MSD(req_dma)); 2959 mcp->mb[7] = LSW(MSD(req_dma));
2922 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 2960 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
2923 if (IS_FWI2_CAPABLE(ha)) { 2961 if (IS_FWI2_CAPABLE(vha->hw)) {
2924 mcp->mb[4] = MSW(size); 2962 mcp->mb[4] = MSW(size);
2925 mcp->mb[5] = LSW(size); 2963 mcp->mb[5] = LSW(size);
2926 mcp->out_mb |= MBX_5|MBX_4; 2964 mcp->out_mb |= MBX_5|MBX_4;
@@ -2932,13 +2970,13 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2932 mcp->in_mb = MBX_0; 2970 mcp->in_mb = MBX_0;
2933 mcp->tov = MBX_TOV_SECONDS; 2971 mcp->tov = MBX_TOV_SECONDS;
2934 mcp->flags = 0; 2972 mcp->flags = 0;
2935 rval = qla2x00_mailbox_command(ha, mcp); 2973 rval = qla2x00_mailbox_command(vha, mcp);
2936 2974
2937 if (rval != QLA_SUCCESS) { 2975 if (rval != QLA_SUCCESS) {
2938 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 2976 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
2939 ha->host_no, rval, mcp->mb[0])); 2977 vha->host_no, rval, mcp->mb[0]));
2940 } else { 2978 } else {
2941 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2979 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2942 } 2980 }
2943 2981
2944 return rval; 2982 return rval;
@@ -2954,20 +2992,21 @@ struct cs84xx_mgmt_cmd {
2954}; 2992};
2955 2993
2956int 2994int
2957qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status) 2995qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
2958{ 2996{
2959 int rval, retry; 2997 int rval, retry;
2960 struct cs84xx_mgmt_cmd *mn; 2998 struct cs84xx_mgmt_cmd *mn;
2961 dma_addr_t mn_dma; 2999 dma_addr_t mn_dma;
2962 uint16_t options; 3000 uint16_t options;
2963 unsigned long flags; 3001 unsigned long flags;
3002 struct qla_hw_data *ha = vha->hw;
2964 3003
2965 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 3004 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2966 3005
2967 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3006 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
2968 if (mn == NULL) { 3007 if (mn == NULL) {
2969 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " 3008 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
2970 "IOCB.\n", __func__, ha->host_no)); 3009 "IOCB.\n", __func__, vha->host_no));
2971 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
2972 } 3011 }
2973 3012
@@ -2986,19 +3025,19 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
2986 mn->p.req.options = cpu_to_le16(options); 3025 mn->p.req.options = cpu_to_le16(options);
2987 3026
2988 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3027 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
2989 ha->host_no)); 3028 vha->host_no));
2990 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3029 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
2991 sizeof(*mn))); 3030 sizeof(*mn)));
2992 3031
2993 rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120); 3032 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
2994 if (rval != QLA_SUCCESS) { 3033 if (rval != QLA_SUCCESS) {
2995 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3034 DEBUG2_16(printk("%s(%ld): failed to issue Verify "
2996 "IOCB (%x).\n", __func__, ha->host_no, rval)); 3035 "IOCB (%x).\n", __func__, vha->host_no, rval));
2997 goto verify_done; 3036 goto verify_done;
2998 } 3037 }
2999 3038
3000 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3039 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
3001 ha->host_no)); 3040 vha->host_no));
3002 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3041 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
3003 sizeof(*mn))); 3042 sizeof(*mn)));
3004 3043
@@ -3006,21 +3045,21 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
3006 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3045 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3007 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3046 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3008 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3047 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
3009 ha->host_no, status[0], status[1])); 3048 vha->host_no, status[0], status[1]));
3010 3049
3011 if (status[0] != CS_COMPLETE) { 3050 if (status[0] != CS_COMPLETE) {
3012 rval = QLA_FUNCTION_FAILED; 3051 rval = QLA_FUNCTION_FAILED;
3013 if (!(options & VCO_DONT_UPDATE_FW)) { 3052 if (!(options & VCO_DONT_UPDATE_FW)) {
3014 DEBUG2_16(printk("%s(%ld): Firmware update " 3053 DEBUG2_16(printk("%s(%ld): Firmware update "
3015 "failed. Retrying without update " 3054 "failed. Retrying without update "
3016 "firmware.\n", __func__, ha->host_no)); 3055 "firmware.\n", __func__, vha->host_no));
3017 options |= VCO_DONT_UPDATE_FW; 3056 options |= VCO_DONT_UPDATE_FW;
3018 options &= ~VCO_FORCE_UPDATE; 3057 options &= ~VCO_FORCE_UPDATE;
3019 retry = 1; 3058 retry = 1;
3020 } 3059 }
3021 } else { 3060 } else {
3022 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3061 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
3023 __func__, ha->host_no, 3062 __func__, vha->host_no,
3024 le32_to_cpu(mn->p.rsp.fw_ver))); 3063 le32_to_cpu(mn->p.rsp.fw_ver)));
3025 3064
3026 /* NOTE: we only update OP firmware. */ 3065 /* NOTE: we only update OP firmware. */
@@ -3037,10 +3076,115 @@ verify_done:
3037 3076
3038 if (rval != QLA_SUCCESS) { 3077 if (rval != QLA_SUCCESS) {
3039 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3078 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
3040 ha->host_no, rval)); 3079 vha->host_no, rval));
3041 } else { 3080 } else {
3042 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3081 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
3082 }
3083
3084 return rval;
3085}
3086
3087int
3088qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req,
3089 uint8_t options)
3090{
3091 int rval;
3092 unsigned long flags;
3093 mbx_cmd_t mc;
3094 mbx_cmd_t *mcp = &mc;
3095 struct device_reg_25xxmq __iomem *reg;
3096 struct qla_hw_data *ha = vha->hw;
3097
3098 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3099 mcp->mb[1] = options;
3100 mcp->mb[2] = MSW(LSD(req->dma));
3101 mcp->mb[3] = LSW(LSD(req->dma));
3102 mcp->mb[6] = MSW(MSD(req->dma));
3103 mcp->mb[7] = LSW(MSD(req->dma));
3104 mcp->mb[5] = req->length;
3105 if (req->rsp)
3106 mcp->mb[10] = req->rsp->id;
3107 mcp->mb[12] = req->qos;
3108 mcp->mb[11] = req->vp_idx;
3109 mcp->mb[13] = req->rid;
3110
3111 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3112 QLA_QUE_PAGE * req->id);
3113
3114 mcp->mb[4] = req->id;
3115 /* que in ptr index */
3116 mcp->mb[8] = 0;
3117 /* que out ptr index */
3118 mcp->mb[9] = 0;
3119 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3120 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3121 mcp->in_mb = MBX_0;
3122 mcp->flags = MBX_DMA_OUT;
3123 mcp->tov = 60;
3124
3125 spin_lock_irqsave(&ha->hardware_lock, flags);
3126 if (!(options & BIT_0)) {
3127 WRT_REG_DWORD(&reg->req_q_in, 0);
3128 WRT_REG_DWORD(&reg->req_q_out, 0);
3129 }
3130 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3131
3132 rval = qla2x00_mailbox_command(vha, mcp);
3133 if (rval != QLA_SUCCESS)
3134 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
3135 __func__, vha->host_no, rval, mcp->mb[0]));
3136 return rval;
3137}
3138
3139int
3140qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
3141 uint8_t options)
3142{
3143 int rval;
3144 unsigned long flags;
3145 mbx_cmd_t mc;
3146 mbx_cmd_t *mcp = &mc;
3147 struct device_reg_25xxmq __iomem *reg;
3148 struct qla_hw_data *ha = vha->hw;
3149
3150 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3151 mcp->mb[1] = options;
3152 mcp->mb[2] = MSW(LSD(rsp->dma));
3153 mcp->mb[3] = LSW(LSD(rsp->dma));
3154 mcp->mb[6] = MSW(MSD(rsp->dma));
3155 mcp->mb[7] = LSW(MSD(rsp->dma));
3156 mcp->mb[5] = rsp->length;
3157 mcp->mb[11] = rsp->vp_idx;
3158 mcp->mb[14] = rsp->msix->vector;
3159 mcp->mb[13] = rsp->rid;
3160
3161 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3162 QLA_QUE_PAGE * rsp->id);
3163
3164 mcp->mb[4] = rsp->id;
3165 /* que in ptr index */
3166 mcp->mb[8] = 0;
3167 /* que out ptr index */
3168 mcp->mb[9] = 0;
3169 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
3170 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3171 mcp->in_mb = MBX_0;
3172 mcp->flags = MBX_DMA_OUT;
3173 mcp->tov = 60;
3174
3175 spin_lock_irqsave(&ha->hardware_lock, flags);
3176 if (!(options & BIT_0)) {
3177 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3178 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3043 } 3179 }
3044 3180
3181 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3182
3183 rval = qla2x00_mailbox_command(vha, mcp);
3184 if (rval != QLA_SUCCESS)
3185 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
3186 "mb0=%x.\n", __func__,
3187 vha->host_no, rval, mcp->mb[0]));
3045 return rval; 3188 return rval;
3046} 3189}
3190
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 93560cd72784..386ffeae5b5a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -18,7 +19,7 @@
18void 19void
19qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 20qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
20{ 21{
21 if (vha->parent && vha->timer_active) { 22 if (vha->vp_idx && vha->timer_active) {
22 del_timer_sync(&vha->timer); 23 del_timer_sync(&vha->timer);
23 vha->timer_active = 0; 24 vha->timer_active = 0;
24 } 25 }
@@ -28,7 +29,7 @@ static uint32_t
28qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 29qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
29{ 30{
30 uint32_t vp_id; 31 uint32_t vp_id;
31 scsi_qla_host_t *ha = vha->parent; 32 struct qla_hw_data *ha = vha->hw;
32 33
33 /* Find an empty slot and assign an vp_id */ 34 /* Find an empty slot and assign an vp_id */
34 mutex_lock(&ha->vport_lock); 35 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 ha->num_vhosts++; 45 ha->num_vhosts++;
45 ha->cur_vport_count++; 46 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
47 list_add_tail(&vha->vp_list, &ha->vp_list); 48 list_add_tail(&vha->list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 49 mutex_unlock(&ha->vport_lock);
49 return vp_id; 50 return vp_id;
50} 51}
@@ -53,24 +54,24 @@ void
53qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 54qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 55{
55 uint16_t vp_id; 56 uint16_t vp_id;
56 scsi_qla_host_t *ha = vha->parent; 57 struct qla_hw_data *ha = vha->hw;
57 58
58 mutex_lock(&ha->vport_lock); 59 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 60 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 61 ha->num_vhosts--;
61 ha->cur_vport_count--; 62 ha->cur_vport_count--;
62 clear_bit(vp_id, ha->vp_idx_map); 63 clear_bit(vp_id, ha->vp_idx_map);
63 list_del(&vha->vp_list); 64 list_del(&vha->list);
64 mutex_unlock(&ha->vport_lock); 65 mutex_unlock(&ha->vport_lock);
65} 66}
66 67
67static scsi_qla_host_t * 68static scsi_qla_host_t *
68qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name) 69qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
69{ 70{
70 scsi_qla_host_t *vha; 71 scsi_qla_host_t *vha;
71 72
72 /* Locate matching device in database. */ 73 /* Locate matching device in database. */
73 list_for_each_entry(vha, &ha->vp_list, vp_list) { 74 list_for_each_entry(vha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 75 if (!memcmp(port_name, vha->port_name, WWN_SIZE))
75 return vha; 76 return vha;
76 } 77 }
@@ -94,16 +95,13 @@ static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 95qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 96{
96 fc_port_t *fcport; 97 fc_port_t *fcport;
97 scsi_qla_host_t *pha = to_qla_parent(vha);
98
99 list_for_each_entry(fcport, &pha->fcports, list) {
100 if (fcport->vp_idx != vha->vp_idx)
101 continue;
102 98
99 list_for_each_entry(fcport, &vha->vp_fcports, list) {
103 DEBUG15(printk("scsi(%ld): Marking port dead, " 100 DEBUG15(printk("scsi(%ld): Marking port dead, "
104 "loop_id=0x%04x :%x\n", 101 "loop_id=0x%04x :%x\n",
105 vha->host_no, fcport->loop_id, fcport->vp_idx)); 102 vha->host_no, fcport->loop_id, fcport->vp_idx));
106 103
104 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
107 qla2x00_mark_device_lost(vha, fcport, 0, 0); 105 qla2x00_mark_device_lost(vha, fcport, 0, 0);
108 atomic_set(&fcport->state, FCS_UNCONFIGURED); 106 atomic_set(&fcport->state, FCS_UNCONFIGURED);
109 } 107 }
@@ -118,7 +116,6 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
118 atomic_set(&vha->loop_state, LOOP_DOWN); 116 atomic_set(&vha->loop_state, LOOP_DOWN);
119 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 117 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
120 118
121 /* Delete all vp's fcports from parent's list */
122 qla2x00_mark_vp_devices_dead(vha); 119 qla2x00_mark_vp_devices_dead(vha);
123 atomic_set(&vha->vp_state, VP_FAILED); 120 atomic_set(&vha->vp_state, VP_FAILED);
124 vha->flags.management_server_logged_in = 0; 121 vha->flags.management_server_logged_in = 0;
@@ -135,11 +132,12 @@ int
135qla24xx_enable_vp(scsi_qla_host_t *vha) 132qla24xx_enable_vp(scsi_qla_host_t *vha)
136{ 133{
137 int ret; 134 int ret;
138 scsi_qla_host_t *ha = vha->parent; 135 struct qla_hw_data *ha = vha->hw;
136 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
139 137
140 /* Check if physical ha port is Up */ 138 /* Check if physical ha port is Up */
141 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 139 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
142 atomic_read(&ha->loop_state) == LOOP_DEAD ) { 140 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
143 vha->vp_err_state = VP_ERR_PORTDWN; 141 vha->vp_err_state = VP_ERR_PORTDWN;
144 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 142 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
145 goto enable_failed; 143 goto enable_failed;
@@ -177,8 +175,8 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
177 vha->host_no, __func__)); 175 vha->host_no, __func__));
178 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 176 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
179 if (ret != QLA_SUCCESS) { 177 if (ret != QLA_SUCCESS) {
180 DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving" 178 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
181 " of RSCN requests: 0x%x\n", ret)); 179 "receiving of RSCN requests: 0x%x\n", ret));
182 return; 180 return;
183 } else { 181 } else {
184 /* Corresponds to SCR enabled */ 182 /* Corresponds to SCR enabled */
@@ -194,25 +192,14 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194} 192}
195 193
196void 194void
197qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb) 195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
198{ 196{
199 int i, vp_idx_matched;
200 scsi_qla_host_t *vha; 197 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw;
199 int i = 0;
201 200
202 if (ha->parent) 201 list_for_each_entry(vha, &ha->vp_list, list) {
203 return; 202 if (vha->vp_idx) {
204
205 for_each_mapped_vp_idx(ha, i) {
206 vp_idx_matched = 0;
207
208 list_for_each_entry(vha, &ha->vp_list, vp_list) {
209 if (i == vha->vp_idx) {
210 vp_idx_matched = 1;
211 break;
212 }
213 }
214
215 if (vp_idx_matched) {
216 switch (mb[0]) { 203 switch (mb[0]) {
217 case MBA_LIP_OCCURRED: 204 case MBA_LIP_OCCURRED:
218 case MBA_LOOP_UP: 205 case MBA_LOOP_UP:
@@ -223,16 +210,17 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
223 case MBA_PORT_UPDATE: 210 case MBA_PORT_UPDATE:
224 case MBA_RSCN_UPDATE: 211 case MBA_RSCN_UPDATE:
225 DEBUG15(printk("scsi(%ld)%s: Async_event for" 212 DEBUG15(printk("scsi(%ld)%s: Async_event for"
226 " VP[%d], mb = 0x%x, vha=%p\n", 213 " VP[%d], mb = 0x%x, vha=%p\n",
227 vha->host_no, __func__,i, *mb, vha)); 214 vha->host_no, __func__, i, *mb, vha));
228 qla2x00_async_event(vha, mb); 215 qla2x00_async_event(vha, rsp, mb);
229 break; 216 break;
230 } 217 }
231 } 218 }
219 i++;
232 } 220 }
233} 221}
234 222
235void 223int
236qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 224qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
237{ 225{
238 /* 226 /*
@@ -247,38 +235,56 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
247 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 235 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
248 } 236 }
249 237
238 /* To exclusively reset vport, we need to log it out first.*/
239 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
240 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
241
250 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 242 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
251 vha->host_no, vha->vp_idx)); 243 vha->host_no, vha->vp_idx));
252 qla24xx_enable_vp(vha); 244 return qla24xx_enable_vp(vha);
253} 245}
254 246
255static int 247static int
256qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 248qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
257{ 249{
258 scsi_qla_host_t *ha = vha->parent; 250 struct qla_hw_data *ha = vha->hw;
251 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
259 252
260 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 253 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
261 /* VP acquired. complete port configuration */ 254 /* VP acquired. complete port configuration */
262 if (atomic_read(&ha->loop_state) == LOOP_READY) { 255 if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
263 qla24xx_configure_vp(vha); 256 qla24xx_configure_vp(vha);
264 } else { 257 } else {
265 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 258 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
266 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 259 set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
267 } 260 }
268 261
269 return 0; 262 return 0;
270 } 263 }
271 264
272 if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 265 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
273 qla2x00_vp_abort_isp(vha); 266 qla2x00_update_fcports(vha);
267 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
268 }
269
270 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
271 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
272 atomic_read(&vha->loop_state) != LOOP_DOWN) {
273
274 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
275 vha->host_no));
276 qla2x00_relogin(vha);
277
278 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
279 vha->host_no));
280 }
274 281
275 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 282 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
276 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 283 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
277 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 284 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
278 } 285 }
279 286
280 if (atomic_read(&vha->vp_state) == VP_ACTIVE && 287 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
281 test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
282 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 288 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
283 qla2x00_loop_resync(vha); 289 qla2x00_loop_resync(vha);
284 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 290 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -289,38 +295,30 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
289} 295}
290 296
291void 297void
292qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha) 298qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
293{ 299{
294 int ret; 300 int ret;
295 int i, vp_idx_matched; 301 struct qla_hw_data *ha = vha->hw;
296 scsi_qla_host_t *vha; 302 scsi_qla_host_t *vp;
297 303
298 if (ha->parent) 304 if (vha->vp_idx)
299 return; 305 return;
300 if (list_empty(&ha->vp_list)) 306 if (list_empty(&ha->vp_list))
301 return; 307 return;
302 308
303 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags); 309 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
304
305 for_each_mapped_vp_idx(ha, i) {
306 vp_idx_matched = 0;
307
308 list_for_each_entry(vha, &ha->vp_list, vp_list) {
309 if (i == vha->vp_idx) {
310 vp_idx_matched = 1;
311 break;
312 }
313 }
314 310
315 if (vp_idx_matched) 311 list_for_each_entry(vp, &ha->vp_list, list) {
316 ret = qla2x00_do_dpc_vp(vha); 312 if (vp->vp_idx)
313 ret = qla2x00_do_dpc_vp(vp);
317 } 314 }
318} 315}
319 316
320int 317int
321qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 318qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
322{ 319{
323 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 320 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
321 struct qla_hw_data *ha = base_vha->hw;
324 scsi_qla_host_t *vha; 322 scsi_qla_host_t *vha;
325 uint8_t port_name[WWN_SIZE]; 323 uint8_t port_name[WWN_SIZE];
326 324
@@ -337,7 +335,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
337 335
338 /* Check up unique WWPN */ 336 /* Check up unique WWPN */
339 u64_to_wwn(fc_vport->port_name, port_name); 337 u64_to_wwn(fc_vport->port_name, port_name);
340 if (!memcmp(port_name, ha->port_name, WWN_SIZE)) 338 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
341 return VPCERR_BAD_WWN; 339 return VPCERR_BAD_WWN;
342 vha = qla24xx_find_vhost_by_name(ha, port_name); 340 vha = qla24xx_find_vhost_by_name(ha, port_name);
343 if (vha) 341 if (vha)
@@ -346,7 +344,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
346 /* Check up max-npiv-supports */ 344 /* Check up max-npiv-supports */
347 if (ha->num_vhosts > ha->max_npiv_vports) { 345 if (ha->num_vhosts > ha->max_npiv_vports) {
348 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 346 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
349 "max_npv_vports %ud.\n", ha->host_no, 347 "max_npv_vports %ud.\n", base_vha->host_no,
350 ha->num_vhosts, ha->max_npiv_vports)); 348 ha->num_vhosts, ha->max_npiv_vports));
351 return VPCERR_UNSUPPORTED; 349 return VPCERR_UNSUPPORTED;
352 } 350 }
@@ -356,59 +354,34 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
356scsi_qla_host_t * 354scsi_qla_host_t *
357qla24xx_create_vhost(struct fc_vport *fc_vport) 355qla24xx_create_vhost(struct fc_vport *fc_vport)
358{ 356{
359 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 357 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
358 struct qla_hw_data *ha = base_vha->hw;
360 scsi_qla_host_t *vha; 359 scsi_qla_host_t *vha;
360 struct scsi_host_template *sht = &qla24xx_driver_template;
361 struct Scsi_Host *host; 361 struct Scsi_Host *host;
362 362
363 host = scsi_host_alloc(&qla24xx_driver_template, 363 vha = qla2x00_create_host(sht, ha);
364 sizeof(scsi_qla_host_t)); 364 if (!vha) {
365 if (!host) { 365 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
366 printk(KERN_WARNING
367 "qla2xxx: scsi_host_alloc() failed for vport\n");
368 return(NULL); 366 return(NULL);
369 } 367 }
370 368
371 vha = shost_priv(host); 369 host = vha->host;
372
373 /* clone the parent hba */
374 memcpy(vha, ha, sizeof (scsi_qla_host_t));
375
376 fc_vport->dd_data = vha; 370 fc_vport->dd_data = vha;
377
378 vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
379 if (!vha->node_name)
380 goto create_vhost_failed_1;
381
382 vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
383 if (!vha->port_name)
384 goto create_vhost_failed_2;
385
386 /* New host info */ 371 /* New host info */
387 u64_to_wwn(fc_vport->node_name, vha->node_name); 372 u64_to_wwn(fc_vport->node_name, vha->node_name);
388 u64_to_wwn(fc_vport->port_name, vha->port_name); 373 u64_to_wwn(fc_vport->port_name, vha->port_name);
389 374
390 vha->host = host;
391 vha->host_no = host->host_no;
392 vha->parent = ha;
393 vha->fc_vport = fc_vport; 375 vha->fc_vport = fc_vport;
394 vha->device_flags = 0; 376 vha->device_flags = 0;
395 vha->vp_idx = qla24xx_allocate_vp_id(vha); 377 vha->vp_idx = qla24xx_allocate_vp_id(vha);
396 if (vha->vp_idx > ha->max_npiv_vports) { 378 if (vha->vp_idx > ha->max_npiv_vports) {
397 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 379 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
398 vha->host_no)); 380 vha->host_no));
399 goto create_vhost_failed_3; 381 goto create_vhost_failed;
400 } 382 }
401 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 383 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
402 384
403 init_completion(&vha->mbx_cmd_comp);
404 complete(&vha->mbx_cmd_comp);
405 init_completion(&vha->mbx_intr_comp);
406
407 INIT_LIST_HEAD(&vha->list);
408 INIT_LIST_HEAD(&vha->fcports);
409 INIT_LIST_HEAD(&vha->vp_fcports);
410 INIT_LIST_HEAD(&vha->work_list);
411
412 vha->dpc_flags = 0L; 385 vha->dpc_flags = 0L;
413 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 386 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
414 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 387 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -423,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
423 396
424 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 397 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
425 398
426 host->can_queue = vha->request_q_length + 128; 399 memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES);
400 vha->req_ques[0] = ha->req_q_map[0]->id;
401 host->can_queue = ha->req_q_map[0]->length + 128;
427 host->this_id = 255; 402 host->this_id = 255;
428 host->cmd_per_lun = 3; 403 host->cmd_per_lun = 3;
429 host->max_cmd_len = MAX_CMDSZ; 404 host->max_cmd_len = MAX_CMDSZ;
@@ -440,12 +415,341 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
440 415
441 return vha; 416 return vha;
442 417
443create_vhost_failed_3: 418create_vhost_failed:
444 kfree(vha->port_name); 419 return NULL;
420}
445 421
446create_vhost_failed_2: 422static void
447 kfree(vha->node_name); 423qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
424{
425 struct qla_hw_data *ha = vha->hw;
426 uint16_t que_id = req->id;
427
428 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
429 sizeof(request_t), req->ring, req->dma);
430 req->ring = NULL;
431 req->dma = 0;
432 if (que_id) {
433 ha->req_q_map[que_id] = NULL;
434 mutex_lock(&ha->vport_lock);
435 clear_bit(que_id, ha->req_qid_map);
436 mutex_unlock(&ha->vport_lock);
437 }
438 kfree(req);
439 req = NULL;
440}
448 441
449create_vhost_failed_1: 442static void
450 return NULL; 443qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
444{
445 struct qla_hw_data *ha = vha->hw;
446 uint16_t que_id = rsp->id;
447
448 if (rsp->msix && rsp->msix->have_irq) {
449 free_irq(rsp->msix->vector, rsp);
450 rsp->msix->have_irq = 0;
451 rsp->msix->rsp = NULL;
452 }
453 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
454 sizeof(response_t), rsp->ring, rsp->dma);
455 rsp->ring = NULL;
456 rsp->dma = 0;
457 if (que_id) {
458 ha->rsp_q_map[que_id] = NULL;
459 mutex_lock(&ha->vport_lock);
460 clear_bit(que_id, ha->rsp_qid_map);
461 mutex_unlock(&ha->vport_lock);
462 }
463 kfree(rsp);
464 rsp = NULL;
465}
466
467int
468qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
469{
470 int ret = -1;
471
472 if (req) {
473 req->options |= BIT_0;
474 ret = qla25xx_init_req_que(vha, req, req->options);
475 }
476 if (ret == QLA_SUCCESS)
477 qla25xx_free_req_que(vha, req);
478
479 return ret;
480}
481
482int
483qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
484{
485 int ret = -1;
486
487 if (rsp) {
488 rsp->options |= BIT_0;
489 ret = qla25xx_init_rsp_que(vha, rsp, rsp->options);
490 }
491 if (ret == QLA_SUCCESS)
492 qla25xx_free_rsp_que(vha, rsp);
493
494 return ret;
495}
496
497int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
498{
499 int ret = 0;
500 struct qla_hw_data *ha = vha->hw;
501 struct req_que *req = ha->req_q_map[que];
502
503 req->options |= BIT_3;
504 req->qos = qos;
505 ret = qla25xx_init_req_que(vha, req, req->options);
506 if (ret != QLA_SUCCESS)
507 DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
508 /* restore options bit */
509 req->options &= ~BIT_3;
510 return ret;
511}
512
513
514/* Delete all queues for a given vhost */
515int
516qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
517{
518 int cnt, ret = 0;
519 struct req_que *req = NULL;
520 struct rsp_que *rsp = NULL;
521 struct qla_hw_data *ha = vha->hw;
522
523 if (que_no) {
524 /* Delete request queue */
525 req = ha->req_q_map[que_no];
526 if (req) {
527 rsp = req->rsp;
528 ret = qla25xx_delete_req_que(vha, req);
529 if (ret != QLA_SUCCESS) {
530 qla_printk(KERN_WARNING, ha,
531 "Couldn't delete req que %d\n", req->id);
532 return ret;
533 }
534 /* Delete associated response queue */
535 if (rsp) {
536 ret = qla25xx_delete_rsp_que(vha, rsp);
537 if (ret != QLA_SUCCESS) {
538 qla_printk(KERN_WARNING, ha,
539 "Couldn't delete rsp que %d\n",
540 rsp->id);
541 return ret;
542 }
543 }
544 }
545 } else { /* delete all queues of this host */
546 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
547 /* Delete request queues */
548 req = ha->req_q_map[vha->req_ques[cnt]];
549 if (req && req->id) {
550 rsp = req->rsp;
551 ret = qla25xx_delete_req_que(vha, req);
552 if (ret != QLA_SUCCESS) {
553 qla_printk(KERN_WARNING, ha,
554 "Couldn't delete req que %d\n",
555 vha->req_ques[cnt]);
556 return ret;
557 }
558 vha->req_ques[cnt] = ha->req_q_map[0]->id;
559 /* Delete associated response queue */
560 if (rsp && rsp->id) {
561 ret = qla25xx_delete_rsp_que(vha, rsp);
562 if (ret != QLA_SUCCESS) {
563 qla_printk(KERN_WARNING, ha,
564 "Couldn't delete rsp que %d\n",
565 rsp->id);
566 return ret;
567 }
568 }
569 }
570 }
571 }
572 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
573 vha->vp_idx);
574 return ret;
575}
576
577int
578qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
579 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
580{
581 int ret = 0;
582 struct req_que *req = NULL;
583 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
584 uint16_t que_id = 0;
585
586 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
587 if (req == NULL) {
588 qla_printk(KERN_WARNING, ha, "could not allocate memory"
589 "for request que\n");
590 goto que_failed;
591 }
592
593 req->length = REQUEST_ENTRY_CNT_24XX;
594 req->ring = dma_alloc_coherent(&ha->pdev->dev,
595 (req->length + 1) * sizeof(request_t),
596 &req->dma, GFP_KERNEL);
597 if (req->ring == NULL) {
598 qla_printk(KERN_WARNING, ha,
599 "Memory Allocation failed - request_ring\n");
600 goto que_failed;
601 }
602
603 mutex_lock(&ha->vport_lock);
604 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
605 if (que_id >= ha->max_queues) {
606 mutex_unlock(&ha->vport_lock);
607 qla_printk(KERN_INFO, ha, "No resources to create "
608 "additional request queue\n");
609 goto que_failed;
610 }
611 set_bit(que_id, ha->req_qid_map);
612 ha->req_q_map[que_id] = req;
613 req->rid = rid;
614 req->vp_idx = vp_idx;
615 req->qos = qos;
616
617 if (ha->rsp_q_map[rsp_que])
618 req->rsp = ha->rsp_q_map[rsp_que];
619 /* Use alternate PCI bus number */
620 if (MSB(req->rid))
621 options |= BIT_4;
622 /* Use alternate PCI devfn */
623 if (LSB(req->rid))
624 options |= BIT_5;
625 req->options = options;
626 req->ring_ptr = req->ring;
627 req->ring_index = 0;
628 req->cnt = req->length;
629 req->id = que_id;
630 mutex_unlock(&ha->vport_lock);
631
632 ret = qla25xx_init_req_que(base_vha, req, options);
633 if (ret != QLA_SUCCESS) {
634 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
635 mutex_lock(&ha->vport_lock);
636 clear_bit(que_id, ha->req_qid_map);
637 mutex_unlock(&ha->vport_lock);
638 goto que_failed;
639 }
640
641 return req->id;
642
643que_failed:
644 qla25xx_free_req_que(base_vha, req);
645 return 0;
646}
647
648/* create response queue */
649int
650qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
651 uint8_t vp_idx, uint16_t rid)
652{
653 int ret = 0;
654 struct rsp_que *rsp = NULL;
655 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
656 uint16_t que_id = 0;;
657
658 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
659 if (rsp == NULL) {
660 qla_printk(KERN_WARNING, ha, "could not allocate memory for"
661 " response que\n");
662 goto que_failed;
663 }
664
665 rsp->length = RESPONSE_ENTRY_CNT_2300;
666 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
667 (rsp->length + 1) * sizeof(response_t),
668 &rsp->dma, GFP_KERNEL);
669 if (rsp->ring == NULL) {
670 qla_printk(KERN_WARNING, ha,
671 "Memory Allocation failed - response_ring\n");
672 goto que_failed;
673 }
674
675 mutex_lock(&ha->vport_lock);
676 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
677 if (que_id >= ha->max_queues) {
678 mutex_unlock(&ha->vport_lock);
679 qla_printk(KERN_INFO, ha, "No resources to create "
680 "additional response queue\n");
681 goto que_failed;
682 }
683 set_bit(que_id, ha->rsp_qid_map);
684
685 if (ha->flags.msix_enabled)
686 rsp->msix = &ha->msix_entries[que_id + 1];
687 else
688 qla_printk(KERN_WARNING, ha, "msix not enabled\n");
689
690 ha->rsp_q_map[que_id] = rsp;
691 rsp->rid = rid;
692 rsp->vp_idx = vp_idx;
693 rsp->hw = ha;
694 /* Use alternate PCI bus number */
695 if (MSB(rsp->rid))
696 options |= BIT_4;
697 /* Use alternate PCI devfn */
698 if (LSB(rsp->rid))
699 options |= BIT_5;
700 rsp->options = options;
701 rsp->ring_ptr = rsp->ring;
702 rsp->ring_index = 0;
703 rsp->id = que_id;
704 mutex_unlock(&ha->vport_lock);
705
706 ret = qla25xx_request_irq(rsp);
707 if (ret)
708 goto que_failed;
709
710 ret = qla25xx_init_rsp_que(base_vha, rsp, options);
711 if (ret != QLA_SUCCESS) {
712 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
713 mutex_lock(&ha->vport_lock);
714 clear_bit(que_id, ha->rsp_qid_map);
715 mutex_unlock(&ha->vport_lock);
716 goto que_failed;
717 }
718
719 qla2x00_init_response_q_entries(rsp);
720
721 return rsp->id;
722
723que_failed:
724 qla25xx_free_rsp_que(base_vha, rsp);
725 return 0;
726}
727
728int
729qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
730{
731 uint16_t options = 0;
732 uint8_t ret = 0;
733 struct qla_hw_data *ha = vha->hw;
734
735 options |= BIT_1;
736 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
737 if (!ret) {
738 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
739 return ret;
740 } else
741 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
742
743 options = 0;
744 if (qos & BIT_7)
745 options |= BIT_8;
746 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
747 qos & ~BIT_7);
748 if (ret) {
749 vha->req_ques[0] = ret;
750 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
751 } else
752 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
753
754 return ret;
451} 755}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 35567203ef61..8ea927788b3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings " 92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94 94
95 95int ql2xmaxqueues = 1;
96module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
97MODULE_PARM_DESC(ql2xmaxqueues,
98 "Enables MQ settings "
99 "Default is 1 for single queue. Set it to number \
100 of queues in MQ mode.");
96/* 101/*
97 * SCSI host template entry points 102 * SCSI host template entry points
98 */ 103 */
@@ -183,42 +188,108 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
183 */ 188 */
184 189
185__inline__ void 190__inline__ void
186qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 191qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
187{ 192{
188 init_timer(&ha->timer); 193 init_timer(&vha->timer);
189 ha->timer.expires = jiffies + interval * HZ; 194 vha->timer.expires = jiffies + interval * HZ;
190 ha->timer.data = (unsigned long)ha; 195 vha->timer.data = (unsigned long)vha;
191 ha->timer.function = (void (*)(unsigned long))func; 196 vha->timer.function = (void (*)(unsigned long))func;
192 add_timer(&ha->timer); 197 add_timer(&vha->timer);
193 ha->timer_active = 1; 198 vha->timer_active = 1;
194} 199}
195 200
196static inline void 201static inline void
197qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 202qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
198{ 203{
199 mod_timer(&ha->timer, jiffies + interval * HZ); 204 mod_timer(&vha->timer, jiffies + interval * HZ);
200} 205}
201 206
202static __inline__ void 207static __inline__ void
203qla2x00_stop_timer(scsi_qla_host_t *ha) 208qla2x00_stop_timer(scsi_qla_host_t *vha)
204{ 209{
205 del_timer_sync(&ha->timer); 210 del_timer_sync(&vha->timer);
206 ha->timer_active = 0; 211 vha->timer_active = 0;
207} 212}
208 213
209static int qla2x00_do_dpc(void *data); 214static int qla2x00_do_dpc(void *data);
210 215
211static void qla2x00_rst_aen(scsi_qla_host_t *); 216static void qla2x00_rst_aen(scsi_qla_host_t *);
212 217
213static int qla2x00_mem_alloc(scsi_qla_host_t *); 218static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
214static void qla2x00_mem_free(scsi_qla_host_t *ha); 219 struct req_que **, struct rsp_que **);
215static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 220static void qla2x00_mem_free(struct qla_hw_data *);
221static void qla2x00_sp_free_dma(srb_t *);
216 222
217/* -------------------------------------------------------------------------- */ 223/* -------------------------------------------------------------------------- */
224static int qla2x00_alloc_queues(struct qla_hw_data *ha)
225{
226 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
227 GFP_KERNEL);
228 if (!ha->req_q_map) {
229 qla_printk(KERN_WARNING, ha,
230 "Unable to allocate memory for request queue ptrs\n");
231 goto fail_req_map;
232 }
233
234 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
235 GFP_KERNEL);
236 if (!ha->rsp_q_map) {
237 qla_printk(KERN_WARNING, ha,
238 "Unable to allocate memory for response queue ptrs\n");
239 goto fail_rsp_map;
240 }
241 set_bit(0, ha->rsp_qid_map);
242 set_bit(0, ha->req_qid_map);
243 return 1;
244
245fail_rsp_map:
246 kfree(ha->req_q_map);
247 ha->req_q_map = NULL;
248fail_req_map:
249 return -ENOMEM;
250}
251
252static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
253 struct rsp_que *rsp)
254{
255 if (rsp && rsp->ring)
256 dma_free_coherent(&ha->pdev->dev,
257 (rsp->length + 1) * sizeof(response_t),
258 rsp->ring, rsp->dma);
259
260 kfree(rsp);
261 rsp = NULL;
262 if (req && req->ring)
263 dma_free_coherent(&ha->pdev->dev,
264 (req->length + 1) * sizeof(request_t),
265 req->ring, req->dma);
266
267 kfree(req);
268 req = NULL;
269}
270
271static void qla2x00_free_queues(struct qla_hw_data *ha)
272{
273 struct req_que *req;
274 struct rsp_que *rsp;
275 int cnt;
276
277 for (cnt = 0; cnt < ha->max_queues; cnt++) {
278 rsp = ha->rsp_q_map[cnt];
279 req = ha->req_q_map[cnt];
280 qla2x00_free_que(ha, req, rsp);
281 }
282 kfree(ha->rsp_q_map);
283 ha->rsp_q_map = NULL;
284
285 kfree(ha->req_q_map);
286 ha->req_q_map = NULL;
287}
218 288
219static char * 289static char *
220qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 290qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
221{ 291{
292 struct qla_hw_data *ha = vha->hw;
222 static char *pci_bus_modes[] = { 293 static char *pci_bus_modes[] = {
223 "33", "66", "100", "133", 294 "33", "66", "100", "133",
224 }; 295 };
@@ -240,9 +311,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
240} 311}
241 312
242static char * 313static char *
243qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 314qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
244{ 315{
245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 316 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
317 struct qla_hw_data *ha = vha->hw;
246 uint32_t pci_bus; 318 uint32_t pci_bus;
247 int pcie_reg; 319 int pcie_reg;
248 320
@@ -290,9 +362,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
290} 362}
291 363
292static char * 364static char *
293qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 365qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
294{ 366{
295 char un_str[10]; 367 char un_str[10];
368 struct qla_hw_data *ha = vha->hw;
296 369
297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 370 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
298 ha->fw_minor_version, 371 ha->fw_minor_version,
@@ -328,8 +401,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
328} 401}
329 402
330static char * 403static char *
331qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
332{ 405{
406 struct qla_hw_data *ha = vha->hw;
333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 407 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
334 ha->fw_minor_version, 408 ha->fw_minor_version,
335 ha->fw_subminor_version); 409 ha->fw_subminor_version);
@@ -354,18 +428,20 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
354} 428}
355 429
356static inline srb_t * 430static inline srb_t *
357qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 431qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 432 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
359{ 433{
360 srb_t *sp; 434 srb_t *sp;
435 struct qla_hw_data *ha = vha->hw;
361 436
362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 437 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
363 if (!sp) 438 if (!sp)
364 return sp; 439 return sp;
365 440
366 sp->ha = ha; 441 sp->vha = vha;
367 sp->fcport = fcport; 442 sp->fcport = fcport;
368 sp->cmd = cmd; 443 sp->cmd = cmd;
444 sp->que = ha->req_q_map[0];
369 sp->flags = 0; 445 sp->flags = 0;
370 CMD_SP(cmd) = (void *)sp; 446 CMD_SP(cmd) = (void *)sp;
371 cmd->scsi_done = done; 447 cmd->scsi_done = done;
@@ -376,9 +452,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
376static int 452static int
377qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 453qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
378{ 454{
379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 455 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 456 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 457 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
458 struct qla_hw_data *ha = vha->hw;
382 srb_t *sp; 459 srb_t *sp;
383 int rval; 460 int rval;
384 461
@@ -399,33 +476,33 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
399 476
400 if (atomic_read(&fcport->state) != FCS_ONLINE) { 477 if (atomic_read(&fcport->state) != FCS_ONLINE) {
401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 478 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
402 atomic_read(&ha->loop_state) == LOOP_DEAD) { 479 atomic_read(&vha->loop_state) == LOOP_DEAD) {
403 cmd->result = DID_NO_CONNECT << 16; 480 cmd->result = DID_NO_CONNECT << 16;
404 goto qc_fail_command; 481 goto qc_fail_command;
405 } 482 }
406 goto qc_target_busy; 483 goto qc_target_busy;
407 } 484 }
408 485
409 spin_unlock_irq(ha->host->host_lock); 486 spin_unlock_irq(vha->host->host_lock);
410 487
411 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 488 sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
412 if (!sp) 489 if (!sp)
413 goto qc_host_busy_lock; 490 goto qc_host_busy_lock;
414 491
415 rval = qla2x00_start_scsi(sp); 492 rval = ha->isp_ops->start_scsi(sp);
416 if (rval != QLA_SUCCESS) 493 if (rval != QLA_SUCCESS)
417 goto qc_host_busy_free_sp; 494 goto qc_host_busy_free_sp;
418 495
419 spin_lock_irq(ha->host->host_lock); 496 spin_lock_irq(vha->host->host_lock);
420 497
421 return 0; 498 return 0;
422 499
423qc_host_busy_free_sp: 500qc_host_busy_free_sp:
424 qla2x00_sp_free_dma(ha, sp); 501 qla2x00_sp_free_dma(sp);
425 mempool_free(sp, ha->srb_mempool); 502 mempool_free(sp, ha->srb_mempool);
426 503
427qc_host_busy_lock: 504qc_host_busy_lock:
428 spin_lock_irq(ha->host->host_lock); 505 spin_lock_irq(vha->host->host_lock);
429 return SCSI_MLQUEUE_HOST_BUSY; 506 return SCSI_MLQUEUE_HOST_BUSY;
430 507
431qc_target_busy: 508qc_target_busy:
@@ -441,14 +518,15 @@ qc_fail_command:
441static int 518static int
442qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 519qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443{ 520{
444 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 521 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
445 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 522 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
446 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 523 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
524 struct qla_hw_data *ha = vha->hw;
525 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
447 srb_t *sp; 526 srb_t *sp;
448 int rval; 527 int rval;
449 scsi_qla_host_t *pha = to_qla_parent(ha);
450 528
451 if (unlikely(pci_channel_offline(pha->pdev))) { 529 if (unlikely(pci_channel_offline(ha->pdev))) {
452 cmd->result = DID_REQUEUE << 16; 530 cmd->result = DID_REQUEUE << 16;
453 goto qc24_fail_command; 531 goto qc24_fail_command;
454 } 532 }
@@ -465,33 +543,33 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
465 543
466 if (atomic_read(&fcport->state) != FCS_ONLINE) { 544 if (atomic_read(&fcport->state) != FCS_ONLINE) {
467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 545 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
468 atomic_read(&pha->loop_state) == LOOP_DEAD) { 546 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
469 cmd->result = DID_NO_CONNECT << 16; 547 cmd->result = DID_NO_CONNECT << 16;
470 goto qc24_fail_command; 548 goto qc24_fail_command;
471 } 549 }
472 goto qc24_target_busy; 550 goto qc24_target_busy;
473 } 551 }
474 552
475 spin_unlock_irq(ha->host->host_lock); 553 spin_unlock_irq(vha->host->host_lock);
476 554
477 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 555 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
478 if (!sp) 556 if (!sp)
479 goto qc24_host_busy_lock; 557 goto qc24_host_busy_lock;
480 558
481 rval = qla24xx_start_scsi(sp); 559 rval = ha->isp_ops->start_scsi(sp);
482 if (rval != QLA_SUCCESS) 560 if (rval != QLA_SUCCESS)
483 goto qc24_host_busy_free_sp; 561 goto qc24_host_busy_free_sp;
484 562
485 spin_lock_irq(ha->host->host_lock); 563 spin_lock_irq(vha->host->host_lock);
486 564
487 return 0; 565 return 0;
488 566
489qc24_host_busy_free_sp: 567qc24_host_busy_free_sp:
490 qla2x00_sp_free_dma(pha, sp); 568 qla2x00_sp_free_dma(sp);
491 mempool_free(sp, pha->srb_mempool); 569 mempool_free(sp, ha->srb_mempool);
492 570
493qc24_host_busy_lock: 571qc24_host_busy_lock:
494 spin_lock_irq(ha->host->host_lock); 572 spin_lock_irq(vha->host->host_lock);
495 return SCSI_MLQUEUE_HOST_BUSY; 573 return SCSI_MLQUEUE_HOST_BUSY;
496 574
497qc24_target_busy: 575qc24_target_busy:
@@ -510,17 +588,14 @@ qc24_fail_command:
510 * max time. 588 * max time.
511 * 589 *
512 * Input: 590 * Input:
513 * ha = actual ha whose done queue will contain the command
514 * returned by firmware.
515 * cmd = Scsi Command to wait on. 591 * cmd = Scsi Command to wait on.
516 * flag = Abort/Reset(Bus or Device Reset)
517 * 592 *
518 * Return: 593 * Return:
519 * Not Found : 0 594 * Not Found : 0
520 * Found : 1 595 * Found : 1
521 */ 596 */
522static int 597static int
523qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 598qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
524{ 599{
525#define ABORT_POLLING_PERIOD 1000 600#define ABORT_POLLING_PERIOD 1000
526#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 601#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -557,21 +632,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
557 * Failed (Adapter is offline/disabled) : 1 632 * Failed (Adapter is offline/disabled) : 1
558 */ 633 */
559int 634int
560qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 635qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
561{ 636{
562 int return_status; 637 int return_status;
563 unsigned long wait_online; 638 unsigned long wait_online;
564 scsi_qla_host_t *pha = to_qla_parent(ha); 639 struct qla_hw_data *ha = vha->hw;
640 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
565 641
566 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 642 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
567 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 643 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
568 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 644 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
569 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 645 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
570 pha->dpc_active) && time_before(jiffies, wait_online)) { 646 ha->dpc_active) && time_before(jiffies, wait_online)) {
571 647
572 msleep(1000); 648 msleep(1000);
573 } 649 }
574 if (pha->flags.online) 650 if (base_vha->flags.online)
575 return_status = QLA_SUCCESS; 651 return_status = QLA_SUCCESS;
576 else 652 else
577 return_status = QLA_FUNCTION_FAILED; 653 return_status = QLA_FUNCTION_FAILED;
@@ -596,19 +672,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
596 * Failed (LOOP_NOT_READY) : 1 672 * Failed (LOOP_NOT_READY) : 1
597 */ 673 */
598static inline int 674static inline int
599qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 675qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
600{ 676{
601 int return_status = QLA_SUCCESS; 677 int return_status = QLA_SUCCESS;
602 unsigned long loop_timeout ; 678 unsigned long loop_timeout ;
603 scsi_qla_host_t *pha = to_qla_parent(ha); 679 struct qla_hw_data *ha = vha->hw;
680 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
604 681
605 /* wait for 5 min at the max for loop to be ready */ 682 /* wait for 5 min at the max for loop to be ready */
606 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 683 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
607 684
608 while ((!atomic_read(&pha->loop_down_timer) && 685 while ((!atomic_read(&base_vha->loop_down_timer) &&
609 atomic_read(&pha->loop_state) == LOOP_DOWN) || 686 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
610 atomic_read(&pha->loop_state) != LOOP_READY) { 687 atomic_read(&base_vha->loop_state) != LOOP_READY) {
611 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 688 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
612 return_status = QLA_FUNCTION_FAILED; 689 return_status = QLA_FUNCTION_FAILED;
613 break; 690 break;
614 } 691 }
@@ -624,35 +701,42 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
624void 701void
625qla2x00_abort_fcport_cmds(fc_port_t *fcport) 702qla2x00_abort_fcport_cmds(fc_port_t *fcport)
626{ 703{
627 int cnt; 704 int cnt, que, id;
628 unsigned long flags; 705 unsigned long flags;
629 srb_t *sp; 706 srb_t *sp;
630 scsi_qla_host_t *ha = fcport->ha; 707 scsi_qla_host_t *vha = fcport->vha;
631 scsi_qla_host_t *pha = to_qla_parent(ha); 708 struct qla_hw_data *ha = vha->hw;
709 struct req_que *req;
632 710
633 spin_lock_irqsave(&pha->hardware_lock, flags); 711 spin_lock_irqsave(&ha->hardware_lock, flags);
634 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 712 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
635 sp = pha->outstanding_cmds[cnt]; 713 id = vha->req_ques[que];
636 if (!sp) 714 req = ha->req_q_map[id];
637 continue; 715 if (!req)
638 if (sp->fcport != fcport)
639 continue; 716 continue;
717 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
718 sp = req->outstanding_cmds[cnt];
719 if (!sp)
720 continue;
721 if (sp->fcport != fcport)
722 continue;
640 723
641 spin_unlock_irqrestore(&pha->hardware_lock, flags); 724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
642 if (ha->isp_ops->abort_command(ha, sp)) { 725 if (ha->isp_ops->abort_command(vha, sp, req)) {
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed -- %lx\n", sp->cmd->serial_number));
645 } else {
646 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
647 QLA_SUCCESS)
648 DEBUG2(qla_printk(KERN_WARNING, ha, 726 DEBUG2(qla_printk(KERN_WARNING, ha,
649 "Abort failed while waiting -- %lx\n", 727 "Abort failed -- %lx\n",
650 sp->cmd->serial_number)); 728 sp->cmd->serial_number));
651 729 } else {
730 if (qla2x00_eh_wait_on_command(sp->cmd) !=
731 QLA_SUCCESS)
732 DEBUG2(qla_printk(KERN_WARNING, ha,
733 "Abort failed while waiting -- %lx\n",
734 sp->cmd->serial_number));
735 }
736 spin_lock_irqsave(&ha->hardware_lock, flags);
652 } 737 }
653 spin_lock_irqsave(&pha->hardware_lock, flags);
654 } 738 }
655 spin_unlock_irqrestore(&pha->hardware_lock, flags); 739 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656} 740}
657 741
658static void 742static void
@@ -690,14 +774,16 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
690static int 774static int
691qla2xxx_eh_abort(struct scsi_cmnd *cmd) 775qla2xxx_eh_abort(struct scsi_cmnd *cmd)
692{ 776{
693 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 777 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
694 srb_t *sp; 778 srb_t *sp;
695 int ret, i; 779 int ret, i;
696 unsigned int id, lun; 780 unsigned int id, lun;
697 unsigned long serial; 781 unsigned long serial;
698 unsigned long flags; 782 unsigned long flags;
699 int wait = 0; 783 int wait = 0;
700 scsi_qla_host_t *pha = to_qla_parent(ha); 784 struct qla_hw_data *ha = vha->hw;
785 struct req_que *req;
786 srb_t *spt;
701 787
702 qla2x00_block_error_handler(cmd); 788 qla2x00_block_error_handler(cmd);
703 789
@@ -709,11 +795,15 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 id = cmd->device->id; 795 id = cmd->device->id;
710 lun = cmd->device->lun; 796 lun = cmd->device->lun;
711 serial = cmd->serial_number; 797 serial = cmd->serial_number;
798 spt = (srb_t *) CMD_SP(cmd);
799 if (!spt)
800 return SUCCESS;
801 req = spt->que;
712 802
713 /* Check active list for command command. */ 803 /* Check active list for command command. */
714 spin_lock_irqsave(&pha->hardware_lock, flags); 804 spin_lock_irqsave(&ha->hardware_lock, flags);
715 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 805 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
716 sp = pha->outstanding_cmds[i]; 806 sp = req->outstanding_cmds[i];
717 807
718 if (sp == NULL) 808 if (sp == NULL)
719 continue; 809 continue;
@@ -721,38 +811,36 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
721 if (sp->cmd != cmd) 811 if (sp->cmd != cmd)
722 continue; 812 continue;
723 813
724 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 814 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
725 __func__, ha->host_no, sp, serial)); 815 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
726 816
727 spin_unlock_irqrestore(&pha->hardware_lock, flags); 817 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728 if (ha->isp_ops->abort_command(ha, sp)) { 818 if (ha->isp_ops->abort_command(vha, sp, req)) {
729 DEBUG2(printk("%s(%ld): abort_command " 819 DEBUG2(printk("%s(%ld): abort_command "
730 "mbx failed.\n", __func__, ha->host_no)); 820 "mbx failed.\n", __func__, vha->host_no));
731 ret = FAILED;
732 } else { 821 } else {
733 DEBUG3(printk("%s(%ld): abort_command " 822 DEBUG3(printk("%s(%ld): abort_command "
734 "mbx success.\n", __func__, ha->host_no)); 823 "mbx success.\n", __func__, vha->host_no));
735 wait = 1; 824 wait = 1;
736 } 825 }
737 spin_lock_irqsave(&pha->hardware_lock, flags); 826 spin_lock_irqsave(&ha->hardware_lock, flags);
738
739 break; 827 break;
740 } 828 }
741 spin_unlock_irqrestore(&pha->hardware_lock, flags); 829 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 830
743 /* Wait for the command to be returned. */ 831 /* Wait for the command to be returned. */
744 if (wait) { 832 if (wait) {
745 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 833 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
746 qla_printk(KERN_ERR, ha, 834 qla_printk(KERN_ERR, ha,
747 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 835 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
748 "%x.\n", ha->host_no, id, lun, serial, ret); 836 "%x.\n", vha->host_no, id, lun, serial, ret);
749 ret = FAILED; 837 ret = FAILED;
750 } 838 }
751 } 839 }
752 840
753 qla_printk(KERN_INFO, ha, 841 qla_printk(KERN_INFO, ha,
754 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 842 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
755 ha->host_no, id, lun, wait, serial, ret); 843 vha->host_no, id, lun, wait, serial, ret);
756 844
757 return ret; 845 return ret;
758} 846}
@@ -764,23 +852,27 @@ enum nexus_wait_type {
764}; 852};
765 853
766static int 854static int
767qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 855qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
768 unsigned int l, enum nexus_wait_type type) 856 unsigned int l, srb_t *sp, enum nexus_wait_type type)
769{ 857{
770 int cnt, match, status; 858 int cnt, match, status;
771 srb_t *sp;
772 unsigned long flags; 859 unsigned long flags;
773 scsi_qla_host_t *pha = to_qla_parent(ha); 860 struct qla_hw_data *ha = vha->hw;
861 struct req_que *req;
774 862
775 status = QLA_SUCCESS; 863 status = QLA_SUCCESS;
776 spin_lock_irqsave(&pha->hardware_lock, flags); 864 if (!sp)
777 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 865 return status;
778 cnt++) { 866
779 sp = pha->outstanding_cmds[cnt]; 867 spin_lock_irqsave(&ha->hardware_lock, flags);
868 req = sp->que;
869 for (cnt = 1; status == QLA_SUCCESS &&
870 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
871 sp = req->outstanding_cmds[cnt];
780 if (!sp) 872 if (!sp)
781 continue; 873 continue;
782 874
783 if (ha->vp_idx != sp->fcport->ha->vp_idx) 875 if (vha->vp_idx != sp->fcport->vha->vp_idx)
784 continue; 876 continue;
785 match = 0; 877 match = 0;
786 switch (type) { 878 switch (type) {
@@ -792,17 +884,17 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
792 break; 884 break;
793 case WAIT_LUN: 885 case WAIT_LUN:
794 match = (sp->cmd->device->id == t && 886 match = (sp->cmd->device->id == t &&
795 sp->cmd->device->lun == l); 887 sp->cmd->device->lun == l);
796 break; 888 break;
797 } 889 }
798 if (!match) 890 if (!match)
799 continue; 891 continue;
800 892
801 spin_unlock_irqrestore(&pha->hardware_lock, flags); 893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 894 status = qla2x00_eh_wait_on_command(sp->cmd);
803 spin_lock_irqsave(&pha->hardware_lock, flags); 895 spin_lock_irqsave(&ha->hardware_lock, flags);
804 } 896 }
805 spin_unlock_irqrestore(&pha->hardware_lock, flags); 897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806 898
807 return status; 899 return status;
808} 900}
@@ -818,7 +910,7 @@ static int
818__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 910__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
819 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 911 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
820{ 912{
821 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 913 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
822 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 914 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
823 int err; 915 int err;
824 916
@@ -827,31 +919,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
827 if (!fcport) 919 if (!fcport)
828 return FAILED; 920 return FAILED;
829 921
830 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 922 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
831 ha->host_no, cmd->device->id, cmd->device->lun, name); 923 vha->host_no, cmd->device->id, cmd->device->lun, name);
832 924
833 err = 0; 925 err = 0;
834 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 926 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
835 goto eh_reset_failed; 927 goto eh_reset_failed;
836 err = 1; 928 err = 1;
837 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 929 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
838 goto eh_reset_failed; 930 goto eh_reset_failed;
839 err = 2; 931 err = 2;
840 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 932 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
841 goto eh_reset_failed; 933 goto eh_reset_failed;
842 err = 3; 934 err = 3;
843 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 935 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
844 cmd->device->lun, type) != QLA_SUCCESS) 936 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
845 goto eh_reset_failed; 937 goto eh_reset_failed;
846 938
847 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 939 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
848 ha->host_no, cmd->device->id, cmd->device->lun, name); 940 vha->host_no, cmd->device->id, cmd->device->lun, name);
849 941
850 return SUCCESS; 942 return SUCCESS;
851 943
852 eh_reset_failed: 944 eh_reset_failed:
853 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 945 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
854 ha->host_no, cmd->device->id, cmd->device->lun, name, 946 , vha->host_no, cmd->device->id, cmd->device->lun, name,
855 reset_errors[err]); 947 reset_errors[err]);
856 return FAILED; 948 return FAILED;
857} 949}
@@ -859,7 +951,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
859static int 951static int
860qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 952qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
861{ 953{
862 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 954 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
955 struct qla_hw_data *ha = vha->hw;
863 956
864 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 957 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
865 ha->isp_ops->lun_reset); 958 ha->isp_ops->lun_reset);
@@ -868,7 +961,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
868static int 961static int
869qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 962qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
870{ 963{
871 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 964 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
965 struct qla_hw_data *ha = vha->hw;
872 966
873 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 967 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
874 ha->isp_ops->target_reset); 968 ha->isp_ops->target_reset);
@@ -892,12 +986,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
892static int 986static int
893qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 987qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
894{ 988{
895 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 989 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
896 scsi_qla_host_t *pha = to_qla_parent(ha);
897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 990 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
898 int ret = FAILED; 991 int ret = FAILED;
899 unsigned int id, lun; 992 unsigned int id, lun;
900 unsigned long serial; 993 unsigned long serial;
994 srb_t *sp = (srb_t *) CMD_SP(cmd);
901 995
902 qla2x00_block_error_handler(cmd); 996 qla2x00_block_error_handler(cmd);
903 997
@@ -908,28 +1002,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
908 if (!fcport) 1002 if (!fcport)
909 return ret; 1003 return ret;
910 1004
911 qla_printk(KERN_INFO, ha, 1005 qla_printk(KERN_INFO, vha->hw,
912 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 1006 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
913 1007
914 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 1008 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
915 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1009 DEBUG2(printk("%s failed:board disabled\n",__func__));
916 goto eh_bus_reset_done; 1010 goto eh_bus_reset_done;
917 } 1011 }
918 1012
919 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 1013 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
920 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 1014 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
921 ret = SUCCESS; 1015 ret = SUCCESS;
922 } 1016 }
923 if (ret == FAILED) 1017 if (ret == FAILED)
924 goto eh_bus_reset_done; 1018 goto eh_bus_reset_done;
925 1019
926 /* Flush outstanding commands. */ 1020 /* Flush outstanding commands. */
927 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 1021 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
928 QLA_SUCCESS) 1022 QLA_SUCCESS)
929 ret = FAILED; 1023 ret = FAILED;
930 1024
931eh_bus_reset_done: 1025eh_bus_reset_done:
932 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1026 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
933 (ret == FAILED) ? "failed" : "succeded"); 1027 (ret == FAILED) ? "failed" : "succeded");
934 1028
935 return ret; 1029 return ret;
@@ -953,12 +1047,14 @@ eh_bus_reset_done:
953static int 1047static int
954qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1048qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
955{ 1049{
956 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 1050 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
957 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1051 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1052 struct qla_hw_data *ha = vha->hw;
958 int ret = FAILED; 1053 int ret = FAILED;
959 unsigned int id, lun; 1054 unsigned int id, lun;
960 unsigned long serial; 1055 unsigned long serial;
961 scsi_qla_host_t *pha = to_qla_parent(ha); 1056 srb_t *sp = (srb_t *) CMD_SP(cmd);
1057 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
962 1058
963 qla2x00_block_error_handler(cmd); 1059 qla2x00_block_error_handler(cmd);
964 1060
@@ -970,9 +1066,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
970 return ret; 1066 return ret;
971 1067
972 qla_printk(KERN_INFO, ha, 1068 qla_printk(KERN_INFO, ha,
973 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 1069 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
974 1070
975 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 1071 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
976 goto eh_host_reset_lock; 1072 goto eh_host_reset_lock;
977 1073
978 /* 1074 /*
@@ -983,26 +1079,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
983 * devices as lost kicking of the port_down_timer 1079 * devices as lost kicking of the port_down_timer
984 * while dpc is stuck for the mailbox to complete. 1080 * while dpc is stuck for the mailbox to complete.
985 */ 1081 */
986 qla2x00_wait_for_loop_ready(ha); 1082 qla2x00_wait_for_loop_ready(vha);
987 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 1083 if (vha != base_vha) {
988 if (qla2x00_abort_isp(pha)) { 1084 if (qla2x00_vp_abort_isp(vha))
989 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
990 /* failed. schedule dpc to try */
991 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
992
993 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
994 goto eh_host_reset_lock; 1085 goto eh_host_reset_lock;
1086 } else {
1087 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1088 if (qla2x00_abort_isp(base_vha)) {
1089 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1090 /* failed. schedule dpc to try */
1091 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1092
1093 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1094 goto eh_host_reset_lock;
1095 }
1096 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
995 } 1097 }
996 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
997 1098
998 /* Waiting for our command in done_queue to be returned to OS.*/ 1099 /* Waiting for command to be returned to OS.*/
999 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 1100 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
1000 QLA_SUCCESS) 1101 QLA_SUCCESS)
1001 ret = SUCCESS; 1102 ret = SUCCESS;
1002 1103
1003 if (ha->parent)
1004 qla2x00_vp_abort_isp(ha);
1005
1006eh_host_reset_lock: 1104eh_host_reset_lock:
1007 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1105 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1008 (ret == FAILED) ? "failed" : "succeded"); 1106 (ret == FAILED) ? "failed" : "succeded");
@@ -1021,35 +1119,36 @@ eh_host_reset_lock:
1021* 0 = success 1119* 0 = success
1022*/ 1120*/
1023int 1121int
1024qla2x00_loop_reset(scsi_qla_host_t *ha) 1122qla2x00_loop_reset(scsi_qla_host_t *vha)
1025{ 1123{
1026 int ret; 1124 int ret;
1027 struct fc_port *fcport; 1125 struct fc_port *fcport;
1126 struct qla_hw_data *ha = vha->hw;
1028 1127
1029 if (ha->flags.enable_lip_full_login) { 1128 if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
1030 ret = qla2x00_full_login_lip(ha); 1129 ret = qla2x00_full_login_lip(vha);
1031 if (ret != QLA_SUCCESS) { 1130 if (ret != QLA_SUCCESS) {
1032 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1131 DEBUG2_3(printk("%s(%ld): failed: "
1033 "full_login_lip=%d.\n", __func__, ha->host_no, 1132 "full_login_lip=%d.\n", __func__, vha->host_no,
1034 ret)); 1133 ret));
1035 } 1134 }
1036 atomic_set(&ha->loop_state, LOOP_DOWN); 1135 atomic_set(&vha->loop_state, LOOP_DOWN);
1037 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 1136 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1038 qla2x00_mark_all_devices_lost(ha, 0); 1137 qla2x00_mark_all_devices_lost(vha, 0);
1039 qla2x00_wait_for_loop_ready(ha); 1138 qla2x00_wait_for_loop_ready(vha);
1040 } 1139 }
1041 1140
1042 if (ha->flags.enable_lip_reset) { 1141 if (ha->flags.enable_lip_reset && !vha->vp_idx) {
1043 ret = qla2x00_lip_reset(ha); 1142 ret = qla2x00_lip_reset(vha);
1044 if (ret != QLA_SUCCESS) { 1143 if (ret != QLA_SUCCESS) {
1045 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1144 DEBUG2_3(printk("%s(%ld): failed: "
1046 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1145 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1047 } 1146 } else
1048 qla2x00_wait_for_loop_ready(ha); 1147 qla2x00_wait_for_loop_ready(vha);
1049 } 1148 }
1050 1149
1051 if (ha->flags.enable_target_reset) { 1150 if (ha->flags.enable_target_reset) {
1052 list_for_each_entry(fcport, &ha->fcports, list) { 1151 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1053 if (fcport->port_type != FCT_TARGET) 1152 if (fcport->port_type != FCT_TARGET)
1054 continue; 1153 continue;
1055 1154
@@ -1057,31 +1156,37 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1057 if (ret != QLA_SUCCESS) { 1156 if (ret != QLA_SUCCESS) {
1058 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1157 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1059 "target_reset=%d d_id=%x.\n", __func__, 1158 "target_reset=%d d_id=%x.\n", __func__,
1060 ha->host_no, ret, fcport->d_id.b24)); 1159 vha->host_no, ret, fcport->d_id.b24));
1061 } 1160 }
1062 } 1161 }
1063 } 1162 }
1064
1065 /* Issue marker command only when we are going to start the I/O */ 1163 /* Issue marker command only when we are going to start the I/O */
1066 ha->marker_needed = 1; 1164 vha->marker_needed = 1;
1067 1165
1068 return QLA_SUCCESS; 1166 return QLA_SUCCESS;
1069} 1167}
1070 1168
1071void 1169void
1072qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1170qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1073{ 1171{
1074 int cnt; 1172 int que, cnt;
1075 unsigned long flags; 1173 unsigned long flags;
1076 srb_t *sp; 1174 srb_t *sp;
1175 struct qla_hw_data *ha = vha->hw;
1176 struct req_que *req;
1077 1177
1078 spin_lock_irqsave(&ha->hardware_lock, flags); 1178 spin_lock_irqsave(&ha->hardware_lock, flags);
1079 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1179 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
1080 sp = ha->outstanding_cmds[cnt]; 1180 req = ha->req_q_map[vha->req_ques[que]];
1081 if (sp) { 1181 if (!req)
1082 ha->outstanding_cmds[cnt] = NULL; 1182 continue;
1083 sp->cmd->result = res; 1183 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1084 qla2x00_sp_compl(ha, sp); 1184 sp = req->outstanding_cmds[cnt];
1185 if (sp && sp->vha == vha) {
1186 req->outstanding_cmds[cnt] = NULL;
1187 sp->cmd->result = res;
1188 qla2x00_sp_compl(ha, sp);
1189 }
1085 } 1190 }
1086 } 1191 }
1087 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1192 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1103,13 +1208,15 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
1103static int 1208static int
1104qla2xxx_slave_configure(struct scsi_device *sdev) 1209qla2xxx_slave_configure(struct scsi_device *sdev)
1105{ 1210{
1106 scsi_qla_host_t *ha = shost_priv(sdev->host); 1211 scsi_qla_host_t *vha = shost_priv(sdev->host);
1212 struct qla_hw_data *ha = vha->hw;
1107 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1213 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1214 struct req_que *req = ha->req_q_map[0];
1108 1215
1109 if (sdev->tagged_supported) 1216 if (sdev->tagged_supported)
1110 scsi_activate_tcq(sdev, ha->max_q_depth); 1217 scsi_activate_tcq(sdev, req->max_q_depth);
1111 else 1218 else
1112 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1219 scsi_deactivate_tcq(sdev, req->max_q_depth);
1113 1220
1114 rport->dev_loss_tmo = ha->port_down_retry_count; 1221 rport->dev_loss_tmo = ha->port_down_retry_count;
1115 1222
@@ -1152,8 +1259,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1152 * supported addressing method. 1259 * supported addressing method.
1153 */ 1260 */
1154static void 1261static void
1155qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1262qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
1156{ 1263{
1264 struct qla_hw_data *ha = vha->hw;
1157 /* Assume a 32bit DMA mask. */ 1265 /* Assume a 32bit DMA mask. */
1158 ha->flags.enable_64bit_addressing = 0; 1266 ha->flags.enable_64bit_addressing = 0;
1159 1267
@@ -1174,7 +1282,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1174} 1282}
1175 1283
1176static void 1284static void
1177qla2x00_enable_intrs(scsi_qla_host_t *ha) 1285qla2x00_enable_intrs(struct qla_hw_data *ha)
1178{ 1286{
1179 unsigned long flags = 0; 1287 unsigned long flags = 0;
1180 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1189,7 +1297,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
1189} 1297}
1190 1298
1191static void 1299static void
1192qla2x00_disable_intrs(scsi_qla_host_t *ha) 1300qla2x00_disable_intrs(struct qla_hw_data *ha)
1193{ 1301{
1194 unsigned long flags = 0; 1302 unsigned long flags = 0;
1195 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1303 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1203,7 +1311,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
1203} 1311}
1204 1312
1205static void 1313static void
1206qla24xx_enable_intrs(scsi_qla_host_t *ha) 1314qla24xx_enable_intrs(struct qla_hw_data *ha)
1207{ 1315{
1208 unsigned long flags = 0; 1316 unsigned long flags = 0;
1209 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1317 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1216,7 +1324,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
1216} 1324}
1217 1325
1218static void 1326static void
1219qla24xx_disable_intrs(scsi_qla_host_t *ha) 1327qla24xx_disable_intrs(struct qla_hw_data *ha)
1220{ 1328{
1221 unsigned long flags = 0; 1329 unsigned long flags = 0;
1222 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1260,6 +1368,10 @@ static struct isp_operations qla2100_isp_ops = {
1260 .read_optrom = qla2x00_read_optrom_data, 1368 .read_optrom = qla2x00_read_optrom_data,
1261 .write_optrom = qla2x00_write_optrom_data, 1369 .write_optrom = qla2x00_write_optrom_data,
1262 .get_flash_version = qla2x00_get_flash_version, 1370 .get_flash_version = qla2x00_get_flash_version,
1371 .start_scsi = qla2x00_start_scsi,
1372 .wrt_req_reg = NULL,
1373 .wrt_rsp_reg = NULL,
1374 .rd_req_reg = NULL,
1263}; 1375};
1264 1376
1265static struct isp_operations qla2300_isp_ops = { 1377static struct isp_operations qla2300_isp_ops = {
@@ -1294,6 +1406,10 @@ static struct isp_operations qla2300_isp_ops = {
1294 .read_optrom = qla2x00_read_optrom_data, 1406 .read_optrom = qla2x00_read_optrom_data,
1295 .write_optrom = qla2x00_write_optrom_data, 1407 .write_optrom = qla2x00_write_optrom_data,
1296 .get_flash_version = qla2x00_get_flash_version, 1408 .get_flash_version = qla2x00_get_flash_version,
1409 .start_scsi = qla2x00_start_scsi,
1410 .wrt_req_reg = NULL,
1411 .wrt_rsp_reg = NULL,
1412 .rd_req_reg = NULL,
1297}; 1413};
1298 1414
1299static struct isp_operations qla24xx_isp_ops = { 1415static struct isp_operations qla24xx_isp_ops = {
@@ -1328,6 +1444,10 @@ static struct isp_operations qla24xx_isp_ops = {
1328 .read_optrom = qla24xx_read_optrom_data, 1444 .read_optrom = qla24xx_read_optrom_data,
1329 .write_optrom = qla24xx_write_optrom_data, 1445 .write_optrom = qla24xx_write_optrom_data,
1330 .get_flash_version = qla24xx_get_flash_version, 1446 .get_flash_version = qla24xx_get_flash_version,
1447 .start_scsi = qla24xx_start_scsi,
1448 .wrt_req_reg = qla24xx_wrt_req_reg,
1449 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1450 .rd_req_reg = qla24xx_rd_req_reg,
1331}; 1451};
1332 1452
1333static struct isp_operations qla25xx_isp_ops = { 1453static struct isp_operations qla25xx_isp_ops = {
@@ -1362,10 +1482,14 @@ static struct isp_operations qla25xx_isp_ops = {
1362 .read_optrom = qla25xx_read_optrom_data, 1482 .read_optrom = qla25xx_read_optrom_data,
1363 .write_optrom = qla24xx_write_optrom_data, 1483 .write_optrom = qla24xx_write_optrom_data,
1364 .get_flash_version = qla24xx_get_flash_version, 1484 .get_flash_version = qla24xx_get_flash_version,
1485 .start_scsi = qla24xx_start_scsi,
1486 .wrt_req_reg = qla24xx_wrt_req_reg,
1487 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1488 .rd_req_reg = qla24xx_rd_req_reg,
1365}; 1489};
1366 1490
1367static inline void 1491static inline void
1368qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1492qla2x00_set_isp_flags(struct qla_hw_data *ha)
1369{ 1493{
1370 ha->device_type = DT_EXTENDED_IDS; 1494 ha->device_type = DT_EXTENDED_IDS;
1371 switch (ha->pdev->device) { 1495 switch (ha->pdev->device) {
@@ -1447,9 +1571,10 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1447} 1571}
1448 1572
1449static int 1573static int
1450qla2x00_iospace_config(scsi_qla_host_t *ha) 1574qla2x00_iospace_config(struct qla_hw_data *ha)
1451{ 1575{
1452 resource_size_t pio; 1576 resource_size_t pio;
1577 uint16_t msix;
1453 1578
1454 if (pci_request_selected_regions(ha->pdev, ha->bars, 1579 if (pci_request_selected_regions(ha->pdev, ha->bars,
1455 QLA2XXX_DRIVER_NAME)) { 1580 QLA2XXX_DRIVER_NAME)) {
@@ -1502,6 +1627,30 @@ skip_pio:
1502 goto iospace_error_exit; 1627 goto iospace_error_exit;
1503 } 1628 }
1504 1629
1630 /* Determine queue resources */
1631 ha->max_queues = 1;
1632 if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
1633 goto mqiobase_exit;
1634 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1635 pci_resource_len(ha->pdev, 3));
1636 if (ha->mqiobase) {
1637 /* Read MSIX vector size of the board */
1638 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1639 ha->msix_count = msix;
1640 /* Max queues are bounded by available msix vectors */
1641 /* queue 0 uses two msix vectors */
1642 if (ha->msix_count - 1 < ql2xmaxqueues)
1643 ha->max_queues = ha->msix_count - 1;
1644 else if (ql2xmaxqueues > QLA_MQ_SIZE)
1645 ha->max_queues = QLA_MQ_SIZE;
1646 else
1647 ha->max_queues = ql2xmaxqueues;
1648 qla_printk(KERN_INFO, ha,
1649 "MSI-X vector count: %d\n", msix);
1650 }
1651
1652mqiobase_exit:
1653 ha->msix_count = ha->max_queues + 1;
1505 return (0); 1654 return (0);
1506 1655
1507iospace_error_exit: 1656iospace_error_exit:
@@ -1511,25 +1660,25 @@ iospace_error_exit:
1511static void 1660static void
1512qla2xxx_scan_start(struct Scsi_Host *shost) 1661qla2xxx_scan_start(struct Scsi_Host *shost)
1513{ 1662{
1514 scsi_qla_host_t *ha = shost_priv(shost); 1663 scsi_qla_host_t *vha = shost_priv(shost);
1515 1664
1516 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1665 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1666 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1667 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1519 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1668 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1520} 1669}
1521 1670
1522static int 1671static int
1523qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1672qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1524{ 1673{
1525 scsi_qla_host_t *ha = shost_priv(shost); 1674 scsi_qla_host_t *vha = shost_priv(shost);
1526 1675
1527 if (!ha->host) 1676 if (!vha->host)
1528 return 1; 1677 return 1;
1529 if (time > ha->loop_reset_delay * HZ) 1678 if (time > vha->hw->loop_reset_delay * HZ)
1530 return 1; 1679 return 1;
1531 1680
1532 return atomic_read(&ha->loop_state) == LOOP_READY; 1681 return atomic_read(&vha->loop_state) == LOOP_READY;
1533} 1682}
1534 1683
1535/* 1684/*
@@ -1540,11 +1689,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1540{ 1689{
1541 int ret = -ENODEV; 1690 int ret = -ENODEV;
1542 struct Scsi_Host *host; 1691 struct Scsi_Host *host;
1543 scsi_qla_host_t *ha; 1692 scsi_qla_host_t *base_vha = NULL;
1693 struct qla_hw_data *ha;
1544 char pci_info[30]; 1694 char pci_info[30];
1545 char fw_str[30]; 1695 char fw_str[30];
1546 struct scsi_host_template *sht; 1696 struct scsi_host_template *sht;
1547 int bars, mem_only = 0; 1697 int bars, max_id, mem_only = 0;
1698 uint16_t req_length = 0, rsp_length = 0;
1699 struct req_que *req = NULL;
1700 struct rsp_que *rsp = NULL;
1548 1701
1549 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1702 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1550 sht = &qla2x00_driver_template; 1703 sht = &qla2x00_driver_template;
@@ -1570,33 +1723,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1570 /* This may fail but that's ok */ 1723 /* This may fail but that's ok */
1571 pci_enable_pcie_error_reporting(pdev); 1724 pci_enable_pcie_error_reporting(pdev);
1572 1725
1573 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1726 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1574 if (host == NULL) { 1727 if (!ha) {
1575 printk(KERN_WARNING 1728 DEBUG(printk("Unable to allocate memory for ha\n"));
1576 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1729 goto probe_out;
1577 goto probe_disable_device;
1578 } 1730 }
1731 ha->pdev = pdev;
1579 1732
1580 /* Clear our data area */ 1733 /* Clear our data area */
1581 ha = shost_priv(host);
1582 memset(ha, 0, sizeof(scsi_qla_host_t));
1583
1584 ha->pdev = pdev;
1585 ha->host = host;
1586 ha->host_no = host->host_no;
1587 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
1588 ha->parent = NULL;
1589 ha->bars = bars; 1734 ha->bars = bars;
1590 ha->mem_only = mem_only; 1735 ha->mem_only = mem_only;
1591 spin_lock_init(&ha->hardware_lock); 1736 spin_lock_init(&ha->hardware_lock);
1592 1737
1593 /* Set ISP-type information. */ 1738 /* Set ISP-type information. */
1594 qla2x00_set_isp_flags(ha); 1739 qla2x00_set_isp_flags(ha);
1595
1596 /* Configure PCI I/O space */ 1740 /* Configure PCI I/O space */
1597 ret = qla2x00_iospace_config(ha); 1741 ret = qla2x00_iospace_config(ha);
1598 if (ret) 1742 if (ret)
1599 goto probe_failed; 1743 goto probe_hw_failed;
1600 1744
1601 qla_printk(KERN_INFO, ha, 1745 qla_printk(KERN_INFO, ha,
1602 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1746 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1604,95 +1748,137 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1604 1748
1605 ha->prev_topology = 0; 1749 ha->prev_topology = 0;
1606 ha->init_cb_size = sizeof(init_cb_t); 1750 ha->init_cb_size = sizeof(init_cb_t);
1607 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
1608 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1751 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1609 ha->optrom_size = OPTROM_SIZE_2300; 1752 ha->optrom_size = OPTROM_SIZE_2300;
1610 1753
1611 ha->max_q_depth = MAX_Q_DEPTH;
1612 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1613 ha->max_q_depth = ql2xmaxqdepth;
1614
1615 /* Assign ISP specific operations. */ 1754 /* Assign ISP specific operations. */
1755 max_id = MAX_TARGETS_2200;
1616 if (IS_QLA2100(ha)) { 1756 if (IS_QLA2100(ha)) {
1617 host->max_id = MAX_TARGETS_2100; 1757 max_id = MAX_TARGETS_2100;
1618 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1758 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1619 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1759 req_length = REQUEST_ENTRY_CNT_2100;
1620 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1760 rsp_length = RESPONSE_ENTRY_CNT_2100;
1621 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1761 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1622 host->sg_tablesize = 32;
1623 ha->gid_list_info_size = 4; 1762 ha->gid_list_info_size = 4;
1624 ha->isp_ops = &qla2100_isp_ops; 1763 ha->isp_ops = &qla2100_isp_ops;
1625 } else if (IS_QLA2200(ha)) { 1764 } else if (IS_QLA2200(ha)) {
1626 host->max_id = MAX_TARGETS_2200;
1627 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1765 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1628 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1766 req_length = REQUEST_ENTRY_CNT_2200;
1629 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1767 rsp_length = RESPONSE_ENTRY_CNT_2100;
1630 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1768 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1631 ha->gid_list_info_size = 4; 1769 ha->gid_list_info_size = 4;
1632 ha->isp_ops = &qla2100_isp_ops; 1770 ha->isp_ops = &qla2100_isp_ops;
1633 } else if (IS_QLA23XX(ha)) { 1771 } else if (IS_QLA23XX(ha)) {
1634 host->max_id = MAX_TARGETS_2200;
1635 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1772 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1636 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1773 req_length = REQUEST_ENTRY_CNT_2200;
1637 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1774 rsp_length = RESPONSE_ENTRY_CNT_2300;
1638 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1775 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1639 ha->gid_list_info_size = 6; 1776 ha->gid_list_info_size = 6;
1640 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1777 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1641 ha->optrom_size = OPTROM_SIZE_2322; 1778 ha->optrom_size = OPTROM_SIZE_2322;
1642 ha->isp_ops = &qla2300_isp_ops; 1779 ha->isp_ops = &qla2300_isp_ops;
1643 } else if (IS_QLA24XX_TYPE(ha)) { 1780 } else if (IS_QLA24XX_TYPE(ha)) {
1644 host->max_id = MAX_TARGETS_2200;
1645 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1781 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1646 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1782 req_length = REQUEST_ENTRY_CNT_24XX;
1647 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1783 rsp_length = RESPONSE_ENTRY_CNT_2300;
1648 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1784 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1649 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1785 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1650 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1651 ha->gid_list_info_size = 8; 1786 ha->gid_list_info_size = 8;
1652 ha->optrom_size = OPTROM_SIZE_24XX; 1787 ha->optrom_size = OPTROM_SIZE_24XX;
1788 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1653 ha->isp_ops = &qla24xx_isp_ops; 1789 ha->isp_ops = &qla24xx_isp_ops;
1654 } else if (IS_QLA25XX(ha)) { 1790 } else if (IS_QLA25XX(ha)) {
1655 host->max_id = MAX_TARGETS_2200;
1656 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1791 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1657 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1792 req_length = REQUEST_ENTRY_CNT_24XX;
1658 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1793 rsp_length = RESPONSE_ENTRY_CNT_2300;
1659 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1794 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1660 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1795 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1661 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1662 ha->gid_list_info_size = 8; 1796 ha->gid_list_info_size = 8;
1663 ha->optrom_size = OPTROM_SIZE_25XX; 1797 ha->optrom_size = OPTROM_SIZE_25XX;
1798 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1664 ha->isp_ops = &qla25xx_isp_ops; 1799 ha->isp_ops = &qla25xx_isp_ops;
1665 } 1800 }
1666 host->can_queue = ha->request_q_length + 128;
1667 1801
1668 mutex_init(&ha->vport_lock); 1802 mutex_init(&ha->vport_lock);
1669 init_completion(&ha->mbx_cmd_comp); 1803 init_completion(&ha->mbx_cmd_comp);
1670 complete(&ha->mbx_cmd_comp); 1804 complete(&ha->mbx_cmd_comp);
1671 init_completion(&ha->mbx_intr_comp); 1805 init_completion(&ha->mbx_intr_comp);
1672 1806
1673 INIT_LIST_HEAD(&ha->list);
1674 INIT_LIST_HEAD(&ha->fcports);
1675 INIT_LIST_HEAD(&ha->vp_list);
1676 INIT_LIST_HEAD(&ha->work_list);
1677
1678 set_bit(0, (unsigned long *) ha->vp_idx_map); 1807 set_bit(0, (unsigned long *) ha->vp_idx_map);
1679 1808
1680 qla2x00_config_dma_addressing(ha); 1809 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1681 if (qla2x00_mem_alloc(ha)) { 1810 if (!ret) {
1682 qla_printk(KERN_WARNING, ha, 1811 qla_printk(KERN_WARNING, ha,
1683 "[ERROR] Failed to allocate memory for adapter\n"); 1812 "[ERROR] Failed to allocate memory for adapter\n");
1684 1813
1814 goto probe_hw_failed;
1815 }
1816
1817 req->max_q_depth = MAX_Q_DEPTH;
1818 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1819 req->max_q_depth = ql2xmaxqdepth;
1820
1821
1822 base_vha = qla2x00_create_host(sht, ha);
1823 if (!base_vha) {
1824 qla_printk(KERN_WARNING, ha,
1825 "[ERROR] Failed to allocate memory for scsi_host\n");
1826
1685 ret = -ENOMEM; 1827 ret = -ENOMEM;
1828 goto probe_hw_failed;
1829 }
1830
1831 pci_set_drvdata(pdev, base_vha);
1832
1833 qla2x00_config_dma_addressing(base_vha);
1834
1835 host = base_vha->host;
1836 base_vha->req_ques[0] = req->id;
1837 host->can_queue = req->length + 128;
1838 if (IS_QLA2XXX_MIDTYPE(ha))
1839 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1840 else
1841 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1842 base_vha->vp_idx;
1843 if (IS_QLA2100(ha))
1844 host->sg_tablesize = 32;
1845 host->max_id = max_id;
1846 host->this_id = 255;
1847 host->cmd_per_lun = 3;
1848 host->unique_id = host->host_no;
1849 host->max_cmd_len = MAX_CMDSZ;
1850 host->max_channel = MAX_BUSES - 1;
1851 host->max_lun = MAX_LUNS;
1852 host->transportt = qla2xxx_transport_template;
1853
1854 /* Set up the irqs */
1855 ret = qla2x00_request_irqs(ha, rsp);
1856 if (ret)
1857 goto probe_failed;
1858
1859 /* Alloc arrays of request and response ring ptrs */
1860 if (!qla2x00_alloc_queues(ha)) {
1861 qla_printk(KERN_WARNING, ha,
1862 "[ERROR] Failed to allocate memory for queue"
1863 " pointers\n");
1686 goto probe_failed; 1864 goto probe_failed;
1687 } 1865 }
1866 ha->rsp_q_map[0] = rsp;
1867 ha->req_q_map[0] = req;
1688 1868
1689 if (qla2x00_initialize_adapter(ha)) { 1869 if (ha->mqenable) {
1870 ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
1871 ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
1872 ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
1873 }
1874
1875 if (qla2x00_initialize_adapter(base_vha)) {
1690 qla_printk(KERN_WARNING, ha, 1876 qla_printk(KERN_WARNING, ha,
1691 "Failed to initialize adapter\n"); 1877 "Failed to initialize adapter\n");
1692 1878
1693 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1879 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1694 "Adapter flags %x.\n", 1880 "Adapter flags %x.\n",
1695 ha->host_no, ha->device_flags)); 1881 base_vha->host_no, base_vha->device_flags));
1696 1882
1697 ret = -ENODEV; 1883 ret = -ENODEV;
1698 goto probe_failed; 1884 goto probe_failed;
@@ -1702,7 +1888,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1702 * Startup the kernel thread for this host adapter 1888 * Startup the kernel thread for this host adapter
1703 */ 1889 */
1704 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1890 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
1705 "%s_dpc", ha->host_str); 1891 "%s_dpc", base_vha->host_str);
1706 if (IS_ERR(ha->dpc_thread)) { 1892 if (IS_ERR(ha->dpc_thread)) {
1707 qla_printk(KERN_WARNING, ha, 1893 qla_printk(KERN_WARNING, ha,
1708 "Unable to start DPC thread!\n"); 1894 "Unable to start DPC thread!\n");
@@ -1710,28 +1896,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1710 goto probe_failed; 1896 goto probe_failed;
1711 } 1897 }
1712 1898
1713 host->this_id = 255; 1899 list_add_tail(&base_vha->list, &ha->vp_list);
1714 host->cmd_per_lun = 3; 1900 base_vha->host->irq = ha->pdev->irq;
1715 host->unique_id = host->host_no;
1716 host->max_cmd_len = MAX_CMDSZ;
1717 host->max_channel = MAX_BUSES - 1;
1718 host->max_lun = MAX_LUNS;
1719 host->transportt = qla2xxx_transport_template;
1720
1721 ret = qla2x00_request_irqs(ha);
1722 if (ret)
1723 goto probe_failed;
1724 1901
1725 /* Initialized the timer */ 1902 /* Initialized the timer */
1726 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1903 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1727 1904
1728 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1905 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1729 ha->host_no, ha)); 1906 base_vha->host_no, ha));
1730 1907
1731 pci_set_drvdata(pdev, ha); 1908 base_vha->flags.init_done = 1;
1732 1909 base_vha->flags.online = 1;
1733 ha->flags.init_done = 1;
1734 ha->flags.online = 1;
1735 1910
1736 ret = scsi_add_host(host, &pdev->dev); 1911 ret = scsi_add_host(host, &pdev->dev);
1737 if (ret) 1912 if (ret)
@@ -1741,76 +1916,98 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1741 1916
1742 scsi_scan_host(host); 1917 scsi_scan_host(host);
1743 1918
1744 qla2x00_alloc_sysfs_attr(ha); 1919 qla2x00_alloc_sysfs_attr(base_vha);
1745 1920
1746 qla2x00_init_host_attr(ha); 1921 qla2x00_init_host_attr(base_vha);
1747 1922
1748 qla2x00_dfs_setup(ha); 1923 qla2x00_dfs_setup(base_vha);
1749 1924
1750 qla_printk(KERN_INFO, ha, "\n" 1925 qla_printk(KERN_INFO, ha, "\n"
1751 " QLogic Fibre Channel HBA Driver: %s\n" 1926 " QLogic Fibre Channel HBA Driver: %s\n"
1752 " QLogic %s - %s\n" 1927 " QLogic %s - %s\n"
1753 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1928 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1754 qla2x00_version_str, ha->model_number, 1929 qla2x00_version_str, ha->model_number,
1755 ha->model_desc ? ha->model_desc: "", pdev->device, 1930 ha->model_desc ? ha->model_desc : "", pdev->device,
1756 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1931 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
1757 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1932 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
1758 ha->isp_ops->fw_version_str(ha, fw_str)); 1933 ha->isp_ops->fw_version_str(base_vha, fw_str));
1759 1934
1760 return 0; 1935 return 0;
1761 1936
1762probe_failed: 1937probe_failed:
1763 qla2x00_free_device(ha); 1938 qla2x00_free_que(ha, req, rsp);
1939 qla2x00_free_device(base_vha);
1764 1940
1765 scsi_host_put(host); 1941 scsi_host_put(base_vha->host);
1766 1942
1767probe_disable_device: 1943probe_hw_failed:
1768 pci_disable_device(pdev); 1944 if (ha->iobase)
1945 iounmap(ha->iobase);
1946
1947 pci_release_selected_regions(ha->pdev, ha->bars);
1948 kfree(ha);
1949 ha = NULL;
1769 1950
1770probe_out: 1951probe_out:
1952 pci_disable_device(pdev);
1771 return ret; 1953 return ret;
1772} 1954}
1773 1955
1774static void 1956static void
1775qla2x00_remove_one(struct pci_dev *pdev) 1957qla2x00_remove_one(struct pci_dev *pdev)
1776{ 1958{
1777 scsi_qla_host_t *ha, *vha, *temp; 1959 scsi_qla_host_t *base_vha, *vha, *temp;
1960 struct qla_hw_data *ha;
1961
1962 base_vha = pci_get_drvdata(pdev);
1963 ha = base_vha->hw;
1964
1965 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
1966 if (vha && vha->fc_vport)
1967 fc_vport_terminate(vha->fc_vport);
1968 }
1778 1969
1779 ha = pci_get_drvdata(pdev); 1970 set_bit(UNLOADING, &base_vha->dpc_flags);
1780 1971
1781 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1972 qla2x00_dfs_remove(base_vha);
1782 fc_vport_terminate(vha->fc_vport);
1783 1973
1784 set_bit(UNLOADING, &ha->dpc_flags); 1974 qla84xx_put_chip(base_vha);
1785 1975
1786 qla2x00_dfs_remove(ha); 1976 qla2x00_free_sysfs_attr(base_vha);
1787 1977
1788 qla84xx_put_chip(ha); 1978 fc_remove_host(base_vha->host);
1789 1979
1790 qla2x00_free_sysfs_attr(ha); 1980 scsi_remove_host(base_vha->host);
1791 1981
1792 fc_remove_host(ha->host); 1982 qla2x00_free_device(base_vha);
1793 1983
1794 scsi_remove_host(ha->host); 1984 scsi_host_put(base_vha->host);
1795 1985
1796 qla2x00_free_device(ha); 1986 if (ha->iobase)
1987 iounmap(ha->iobase);
1797 1988
1798 scsi_host_put(ha->host); 1989 if (ha->mqiobase)
1990 iounmap(ha->mqiobase);
1991
1992 pci_release_selected_regions(ha->pdev, ha->bars);
1993 kfree(ha);
1994 ha = NULL;
1799 1995
1800 pci_disable_device(pdev); 1996 pci_disable_device(pdev);
1801 pci_set_drvdata(pdev, NULL); 1997 pci_set_drvdata(pdev, NULL);
1802} 1998}
1803 1999
1804static void 2000static void
1805qla2x00_free_device(scsi_qla_host_t *ha) 2001qla2x00_free_device(scsi_qla_host_t *vha)
1806{ 2002{
1807 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 2003 struct qla_hw_data *ha = vha->hw;
2004 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1808 2005
1809 /* Disable timer */ 2006 /* Disable timer */
1810 if (ha->timer_active) 2007 if (vha->timer_active)
1811 qla2x00_stop_timer(ha); 2008 qla2x00_stop_timer(vha);
1812 2009
1813 ha->flags.online = 0; 2010 vha->flags.online = 0;
1814 2011
1815 /* Kill the kernel thread for this host */ 2012 /* Kill the kernel thread for this host */
1816 if (ha->dpc_thread) { 2013 if (ha->dpc_thread) {
@@ -1825,45 +2022,41 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1825 } 2022 }
1826 2023
1827 if (ha->flags.fce_enabled) 2024 if (ha->flags.fce_enabled)
1828 qla2x00_disable_fce_trace(ha, NULL, NULL); 2025 qla2x00_disable_fce_trace(vha, NULL, NULL);
1829 2026
1830 if (ha->eft) 2027 if (ha->eft)
1831 qla2x00_disable_eft_trace(ha); 2028 qla2x00_disable_eft_trace(vha);
1832 2029
1833 /* Stop currently executing firmware. */ 2030 /* Stop currently executing firmware. */
1834 qla2x00_try_to_stop_firmware(ha); 2031 qla2x00_try_to_stop_firmware(vha);
1835 2032
1836 /* turn-off interrupts on the card */ 2033 /* turn-off interrupts on the card */
1837 if (ha->interrupts_on) 2034 if (ha->interrupts_on)
1838 ha->isp_ops->disable_intrs(ha); 2035 ha->isp_ops->disable_intrs(ha);
1839 2036
1840 qla2x00_mem_free(ha); 2037 qla2x00_free_irqs(vha);
1841 2038
1842 qla2x00_free_irqs(ha); 2039 qla2x00_mem_free(ha);
1843 2040
1844 /* release io space registers */ 2041 qla2x00_free_queues(ha);
1845 if (ha->iobase)
1846 iounmap(ha->iobase);
1847 pci_release_selected_regions(ha->pdev, ha->bars);
1848} 2042}
1849 2043
1850static inline void 2044static inline void
1851qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 2045qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
1852 int defer) 2046 int defer)
1853{ 2047{
1854 struct fc_rport *rport; 2048 struct fc_rport *rport;
1855 scsi_qla_host_t *pha = to_qla_parent(ha);
1856 2049
1857 if (!fcport->rport) 2050 if (!fcport->rport)
1858 return; 2051 return;
1859 2052
1860 rport = fcport->rport; 2053 rport = fcport->rport;
1861 if (defer) { 2054 if (defer) {
1862 spin_lock_irq(ha->host->host_lock); 2055 spin_lock_irq(vha->host->host_lock);
1863 fcport->drport = rport; 2056 fcport->drport = rport;
1864 spin_unlock_irq(ha->host->host_lock); 2057 spin_unlock_irq(vha->host->host_lock);
1865 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 2058 set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
1866 qla2xxx_wake_dpc(pha); 2059 qla2xxx_wake_dpc(vha);
1867 } else 2060 } else
1868 fc_remote_port_delete(rport); 2061 fc_remote_port_delete(rport);
1869} 2062}
@@ -1877,13 +2070,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1877 * 2070 *
1878 * Context: 2071 * Context:
1879 */ 2072 */
1880void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 2073void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
1881 int do_login, int defer) 2074 int do_login, int defer)
1882{ 2075{
1883 if (atomic_read(&fcport->state) == FCS_ONLINE && 2076 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1884 ha->vp_idx == fcport->vp_idx) 2077 vha->vp_idx == fcport->vp_idx) {
1885 qla2x00_schedule_rport_del(ha, fcport, defer); 2078 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1886 2079 qla2x00_schedule_rport_del(vha, fcport, defer);
2080 }
1887 /* 2081 /*
1888 * We may need to retry the login, so don't change the state of the 2082 * We may need to retry the login, so don't change the state of the
1889 * port but do the retries. 2083 * port but do the retries.
@@ -1895,13 +2089,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1895 return; 2089 return;
1896 2090
1897 if (fcport->login_retry == 0) { 2091 if (fcport->login_retry == 0) {
1898 fcport->login_retry = ha->login_retry_count; 2092 fcport->login_retry = vha->hw->login_retry_count;
1899 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2093 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1900 2094
1901 DEBUG(printk("scsi(%ld): Port login retry: " 2095 DEBUG(printk("scsi(%ld): Port login retry: "
1902 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2096 "%02x%02x%02x%02x%02x%02x%02x%02x, "
1903 "id = 0x%04x retry cnt=%d\n", 2097 "id = 0x%04x retry cnt=%d\n",
1904 ha->host_no, 2098 vha->host_no,
1905 fcport->port_name[0], 2099 fcport->port_name[0],
1906 fcport->port_name[1], 2100 fcport->port_name[1],
1907 fcport->port_name[2], 2101 fcport->port_name[2],
@@ -1929,13 +2123,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1929 * Context: 2123 * Context:
1930 */ 2124 */
1931void 2125void
1932qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 2126qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1933{ 2127{
1934 fc_port_t *fcport; 2128 fc_port_t *fcport;
1935 scsi_qla_host_t *pha = to_qla_parent(ha);
1936 2129
1937 list_for_each_entry(fcport, &pha->fcports, list) { 2130 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1938 if (ha->vp_idx != fcport->vp_idx) 2131 if (vha->vp_idx != fcport->vp_idx)
1939 continue; 2132 continue;
1940 /* 2133 /*
1941 * No point in marking the device as lost, if the device is 2134 * No point in marking the device as lost, if the device is
@@ -1943,9 +2136,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1943 */ 2136 */
1944 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2137 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1945 continue; 2138 continue;
1946 if (atomic_read(&fcport->state) == FCS_ONLINE) 2139 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1947 qla2x00_schedule_rport_del(ha, fcport, defer); 2140 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1948 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2141 qla2x00_schedule_rport_del(vha, fcport, defer);
2142 } else
2143 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1949 } 2144 }
1950} 2145}
1951 2146
@@ -1958,105 +2153,153 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1958* !0 = failure. 2153* !0 = failure.
1959*/ 2154*/
1960static int 2155static int
1961qla2x00_mem_alloc(scsi_qla_host_t *ha) 2156qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2157 struct req_que **req, struct rsp_que **rsp)
1962{ 2158{
1963 char name[16]; 2159 char name[16];
1964 2160
1965 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2161 ha->init_cb_size = sizeof(init_cb_t);
1966 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 2162 if (IS_QLA2XXX_MIDTYPE(ha))
1967 GFP_KERNEL); 2163 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1968 if (!ha->request_ring)
1969 goto fail;
1970
1971 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
1972 (ha->response_q_length + 1) * sizeof(response_t),
1973 &ha->response_dma, GFP_KERNEL);
1974 if (!ha->response_ring)
1975 goto fail_free_request_ring;
1976
1977 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1978 &ha->gid_list_dma, GFP_KERNEL);
1979 if (!ha->gid_list)
1980 goto fail_free_response_ring;
1981 2164
1982 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 2165 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
1983 &ha->init_cb_dma, GFP_KERNEL); 2166 &ha->init_cb_dma, GFP_KERNEL);
1984 if (!ha->init_cb) 2167 if (!ha->init_cb)
1985 goto fail_free_gid_list; 2168 goto fail;
1986 2169
1987 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2170 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1988 ha->host_no); 2171 &ha->gid_list_dma, GFP_KERNEL);
1989 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2172 if (!ha->gid_list)
1990 DMA_POOL_SIZE, 8, 0);
1991 if (!ha->s_dma_pool)
1992 goto fail_free_init_cb; 2173 goto fail_free_init_cb;
1993 2174
1994 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2175 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
1995 if (!ha->srb_mempool) 2176 if (!ha->srb_mempool)
1996 goto fail_free_s_dma_pool; 2177 goto fail_free_gid_list;
1997 2178
1998 /* Get memory for cached NVRAM */ 2179 /* Get memory for cached NVRAM */
1999 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2180 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2000 if (!ha->nvram) 2181 if (!ha->nvram)
2001 goto fail_free_srb_mempool; 2182 goto fail_free_srb_mempool;
2002 2183
2184 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2185 ha->pdev->device);
2186 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2187 DMA_POOL_SIZE, 8, 0);
2188 if (!ha->s_dma_pool)
2189 goto fail_free_nvram;
2190
2003 /* Allocate memory for SNS commands */ 2191 /* Allocate memory for SNS commands */
2004 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2192 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2005 /* Get consistent memory allocated for SNS commands */ 2193 /* Get consistent memory allocated for SNS commands */
2006 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2194 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2007 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2195 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2008 if (!ha->sns_cmd) 2196 if (!ha->sns_cmd)
2009 goto fail_free_nvram; 2197 goto fail_dma_pool;
2010 } else { 2198 } else {
2011 /* Get consistent memory allocated for MS IOCB */ 2199 /* Get consistent memory allocated for MS IOCB */
2012 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2200 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2013 &ha->ms_iocb_dma); 2201 &ha->ms_iocb_dma);
2014 if (!ha->ms_iocb) 2202 if (!ha->ms_iocb)
2015 goto fail_free_nvram; 2203 goto fail_dma_pool;
2016 2204 /* Get consistent memory allocated for CT SNS commands */
2017 /* Get consistent memory allocated for CT SNS commands */
2018 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2205 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2019 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2206 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2020 if (!ha->ct_sns) 2207 if (!ha->ct_sns)
2021 goto fail_free_ms_iocb; 2208 goto fail_free_ms_iocb;
2022 } 2209 }
2023 2210
2024 return 0; 2211 /* Allocate memory for request ring */
2212 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2213 if (!*req) {
2214 DEBUG(printk("Unable to allocate memory for req\n"));
2215 goto fail_req;
2216 }
2217 (*req)->length = req_len;
2218 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2219 ((*req)->length + 1) * sizeof(request_t),
2220 &(*req)->dma, GFP_KERNEL);
2221 if (!(*req)->ring) {
2222 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2223 goto fail_req_ring;
2224 }
2225 /* Allocate memory for response ring */
2226 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2227 if (!*rsp) {
2228 qla_printk(KERN_WARNING, ha,
2229 "Unable to allocate memory for rsp\n");
2230 goto fail_rsp;
2231 }
2232 (*rsp)->hw = ha;
2233 (*rsp)->length = rsp_len;
2234 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2235 ((*rsp)->length + 1) * sizeof(response_t),
2236 &(*rsp)->dma, GFP_KERNEL);
2237 if (!(*rsp)->ring) {
2238 qla_printk(KERN_WARNING, ha,
2239 "Unable to allocate memory for rsp_ring\n");
2240 goto fail_rsp_ring;
2241 }
2242 (*req)->rsp = *rsp;
2243 (*rsp)->req = *req;
2244 /* Allocate memory for NVRAM data for vports */
2245 if (ha->nvram_npiv_size) {
2246 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2247 ha->nvram_npiv_size, GFP_KERNEL);
2248 if (!ha->npiv_info) {
2249 qla_printk(KERN_WARNING, ha,
2250 "Unable to allocate memory for npiv info\n");
2251 goto fail_npiv_info;
2252 }
2253 } else
2254 ha->npiv_info = NULL;
2025 2255
2256 INIT_LIST_HEAD(&ha->vp_list);
2257 return 1;
2258
2259fail_npiv_info:
2260 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2261 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2262 (*rsp)->ring = NULL;
2263 (*rsp)->dma = 0;
2264fail_rsp_ring:
2265 kfree(*rsp);
2266fail_rsp:
2267 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2268 sizeof(request_t), (*req)->ring, (*req)->dma);
2269 (*req)->ring = NULL;
2270 (*req)->dma = 0;
2271fail_req_ring:
2272 kfree(*req);
2273fail_req:
2274 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2275 ha->ct_sns, ha->ct_sns_dma);
2276 ha->ct_sns = NULL;
2277 ha->ct_sns_dma = 0;
2026fail_free_ms_iocb: 2278fail_free_ms_iocb:
2027 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2279 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2028 ha->ms_iocb = NULL; 2280 ha->ms_iocb = NULL;
2029 ha->ms_iocb_dma = 0; 2281 ha->ms_iocb_dma = 0;
2282fail_dma_pool:
2283 dma_pool_destroy(ha->s_dma_pool);
2284 ha->s_dma_pool = NULL;
2030fail_free_nvram: 2285fail_free_nvram:
2031 kfree(ha->nvram); 2286 kfree(ha->nvram);
2032 ha->nvram = NULL; 2287 ha->nvram = NULL;
2033fail_free_srb_mempool: 2288fail_free_srb_mempool:
2034 mempool_destroy(ha->srb_mempool); 2289 mempool_destroy(ha->srb_mempool);
2035 ha->srb_mempool = NULL; 2290 ha->srb_mempool = NULL;
2036fail_free_s_dma_pool:
2037 dma_pool_destroy(ha->s_dma_pool);
2038 ha->s_dma_pool = NULL;
2039fail_free_init_cb:
2040 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2041 ha->init_cb_dma);
2042 ha->init_cb = NULL;
2043 ha->init_cb_dma = 0;
2044fail_free_gid_list: 2291fail_free_gid_list:
2045 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2292 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2046 ha->gid_list_dma); 2293 ha->gid_list_dma);
2047 ha->gid_list = NULL; 2294 ha->gid_list = NULL;
2048 ha->gid_list_dma = 0; 2295 ha->gid_list_dma = 0;
2049fail_free_response_ring: 2296fail_free_init_cb:
2050 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2297 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2051 sizeof(response_t), ha->response_ring, ha->response_dma); 2298 ha->init_cb_dma);
2052 ha->response_ring = NULL; 2299 ha->init_cb = NULL;
2053 ha->response_dma = 0; 2300 ha->init_cb_dma = 0;
2054fail_free_request_ring:
2055 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2056 sizeof(request_t), ha->request_ring, ha->request_dma);
2057 ha->request_ring = NULL;
2058 ha->request_dma = 0;
2059fail: 2301fail:
2302 DEBUG(printk("%s: Memory allocation failure\n", __func__));
2060 return -ENOMEM; 2303 return -ENOMEM;
2061} 2304}
2062 2305
@@ -2068,32 +2311,29 @@ fail:
2068* ha = adapter block pointer. 2311* ha = adapter block pointer.
2069*/ 2312*/
2070static void 2313static void
2071qla2x00_mem_free(scsi_qla_host_t *ha) 2314qla2x00_mem_free(struct qla_hw_data *ha)
2072{ 2315{
2073 struct list_head *fcpl, *fcptemp;
2074 fc_port_t *fcport;
2075
2076 if (ha->srb_mempool) 2316 if (ha->srb_mempool)
2077 mempool_destroy(ha->srb_mempool); 2317 mempool_destroy(ha->srb_mempool);
2078 2318
2079 if (ha->fce) 2319 if (ha->fce)
2080 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2320 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2081 ha->fce_dma); 2321 ha->fce_dma);
2082 2322
2083 if (ha->fw_dump) { 2323 if (ha->fw_dump) {
2084 if (ha->eft) 2324 if (ha->eft)
2085 dma_free_coherent(&ha->pdev->dev, 2325 dma_free_coherent(&ha->pdev->dev,
2086 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2326 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2087 vfree(ha->fw_dump); 2327 vfree(ha->fw_dump);
2088 } 2328 }
2089 2329
2090 if (ha->sns_cmd) 2330 if (ha->sns_cmd)
2091 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2331 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2092 ha->sns_cmd, ha->sns_cmd_dma); 2332 ha->sns_cmd, ha->sns_cmd_dma);
2093 2333
2094 if (ha->ct_sns) 2334 if (ha->ct_sns)
2095 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2335 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2096 ha->ct_sns, ha->ct_sns_dma); 2336 ha->ct_sns, ha->ct_sns_dma);
2097 2337
2098 if (ha->sfp_data) 2338 if (ha->sfp_data)
2099 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2339 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2104,23 +2344,18 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2104 if (ha->s_dma_pool) 2344 if (ha->s_dma_pool)
2105 dma_pool_destroy(ha->s_dma_pool); 2345 dma_pool_destroy(ha->s_dma_pool);
2106 2346
2107 if (ha->init_cb)
2108 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2109 ha->init_cb, ha->init_cb_dma);
2110 2347
2111 if (ha->gid_list) 2348 if (ha->gid_list)
2112 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2349 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2113 ha->gid_list_dma); 2350 ha->gid_list_dma);
2114 2351
2115 if (ha->response_ring)
2116 dma_free_coherent(&ha->pdev->dev,
2117 (ha->response_q_length + 1) * sizeof(response_t),
2118 ha->response_ring, ha->response_dma);
2119 2352
2120 if (ha->request_ring) 2353 if (ha->init_cb)
2121 dma_free_coherent(&ha->pdev->dev, 2354 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2122 (ha->request_q_length + 1) * sizeof(request_t), 2355 ha->init_cb, ha->init_cb_dma);
2123 ha->request_ring, ha->request_dma); 2356 vfree(ha->optrom_buffer);
2357 kfree(ha->nvram);
2358 kfree(ha->npiv_info);
2124 2359
2125 ha->srb_mempool = NULL; 2360 ha->srb_mempool = NULL;
2126 ha->eft = NULL; 2361 ha->eft = NULL;
@@ -2139,30 +2374,45 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2139 ha->gid_list = NULL; 2374 ha->gid_list = NULL;
2140 ha->gid_list_dma = 0; 2375 ha->gid_list_dma = 0;
2141 2376
2142 ha->response_ring = NULL; 2377 ha->fw_dump = NULL;
2143 ha->response_dma = 0; 2378 ha->fw_dumped = 0;
2144 ha->request_ring = NULL; 2379 ha->fw_dump_reading = 0;
2145 ha->request_dma = 0; 2380}
2146 2381
2147 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2382struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2148 fcport = list_entry(fcpl, fc_port_t, list); 2383 struct qla_hw_data *ha)
2384{
2385 struct Scsi_Host *host;
2386 struct scsi_qla_host *vha = NULL;
2149 2387
2150 /* fc ports */ 2388 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2151 list_del_init(&fcport->list); 2389 if (host == NULL) {
2152 kfree(fcport); 2390 printk(KERN_WARNING
2391 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2392 goto fail;
2153 } 2393 }
2154 INIT_LIST_HEAD(&ha->fcports);
2155 2394
2156 ha->fw_dump = NULL; 2395 /* Clear our data area */
2157 ha->fw_dumped = 0; 2396 vha = shost_priv(host);
2158 ha->fw_dump_reading = 0; 2397 memset(vha, 0, sizeof(scsi_qla_host_t));
2159 2398
2160 vfree(ha->optrom_buffer); 2399 vha->host = host;
2161 kfree(ha->nvram); 2400 vha->host_no = host->host_no;
2401 vha->hw = ha;
2402
2403 INIT_LIST_HEAD(&vha->vp_fcports);
2404 INIT_LIST_HEAD(&vha->work_list);
2405 INIT_LIST_HEAD(&vha->list);
2406
2407 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2408 return vha;
2409
2410fail:
2411 return vha;
2162} 2412}
2163 2413
2164static struct qla_work_evt * 2414static struct qla_work_evt *
2165qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2415qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2166 int locked) 2416 int locked)
2167{ 2417{
2168 struct qla_work_evt *e; 2418 struct qla_work_evt *e;
@@ -2179,42 +2429,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2179} 2429}
2180 2430
2181static int 2431static int
2182qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2432qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
2183{ 2433{
2184 unsigned long uninitialized_var(flags); 2434 unsigned long uninitialized_var(flags);
2185 scsi_qla_host_t *pha = to_qla_parent(ha); 2435 struct qla_hw_data *ha = vha->hw;
2186 2436
2187 if (!locked) 2437 if (!locked)
2188 spin_lock_irqsave(&pha->hardware_lock, flags); 2438 spin_lock_irqsave(&ha->hardware_lock, flags);
2189 list_add_tail(&e->list, &ha->work_list); 2439 list_add_tail(&e->list, &vha->work_list);
2190 qla2xxx_wake_dpc(ha); 2440 qla2xxx_wake_dpc(vha);
2191 if (!locked) 2441 if (!locked)
2192 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2193 return QLA_SUCCESS; 2443 return QLA_SUCCESS;
2194} 2444}
2195 2445
2196int 2446int
2197qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2447qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2198 u32 data) 2448 u32 data)
2199{ 2449{
2200 struct qla_work_evt *e; 2450 struct qla_work_evt *e;
2201 2451
2202 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2452 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
2203 if (!e) 2453 if (!e)
2204 return QLA_FUNCTION_FAILED; 2454 return QLA_FUNCTION_FAILED;
2205 2455
2206 e->u.aen.code = code; 2456 e->u.aen.code = code;
2207 e->u.aen.data = data; 2457 e->u.aen.data = data;
2208 return qla2x00_post_work(ha, e, 1); 2458 return qla2x00_post_work(vha, e, 1);
2209} 2459}
2210 2460
2211int 2461int
2212qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2462qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2213 uint16_t d2, uint16_t d3) 2463 uint16_t d2, uint16_t d3)
2214{ 2464{
2215 struct qla_work_evt *e; 2465 struct qla_work_evt *e;
2216 2466
2217 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2467 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2218 if (!e) 2468 if (!e)
2219 return QLA_FUNCTION_FAILED; 2469 return QLA_FUNCTION_FAILED;
2220 2470
@@ -2222,36 +2472,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
2222 e->u.hwe.d1 = d1; 2472 e->u.hwe.d1 = d1;
2223 e->u.hwe.d2 = d2; 2473 e->u.hwe.d2 = d2;
2224 e->u.hwe.d3 = d3; 2474 e->u.hwe.d3 = d3;
2225 return qla2x00_post_work(ha, e, 1); 2475 return qla2x00_post_work(vha, e, 1);
2226} 2476}
2227 2477
2228static void 2478static void
2229qla2x00_do_work(struct scsi_qla_host *ha) 2479qla2x00_do_work(struct scsi_qla_host *vha)
2230{ 2480{
2231 struct qla_work_evt *e; 2481 struct qla_work_evt *e;
2232 scsi_qla_host_t *pha = to_qla_parent(ha); 2482 struct qla_hw_data *ha = vha->hw;
2233 2483
2234 spin_lock_irq(&pha->hardware_lock); 2484 spin_lock_irq(&ha->hardware_lock);
2235 while (!list_empty(&ha->work_list)) { 2485 while (!list_empty(&vha->work_list)) {
2236 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2486 e = list_entry(vha->work_list.next, struct qla_work_evt, list);
2237 list_del_init(&e->list); 2487 list_del_init(&e->list);
2238 spin_unlock_irq(&pha->hardware_lock); 2488 spin_unlock_irq(&ha->hardware_lock);
2239 2489
2240 switch (e->type) { 2490 switch (e->type) {
2241 case QLA_EVT_AEN: 2491 case QLA_EVT_AEN:
2242 fc_host_post_event(ha->host, fc_get_event_number(), 2492 fc_host_post_event(vha->host, fc_get_event_number(),
2243 e->u.aen.code, e->u.aen.data); 2493 e->u.aen.code, e->u.aen.data);
2244 break; 2494 break;
2245 case QLA_EVT_HWE_LOG: 2495 case QLA_EVT_HWE_LOG:
2246 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2496 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2247 e->u.hwe.d2, e->u.hwe.d3); 2497 e->u.hwe.d2, e->u.hwe.d3);
2248 break; 2498 break;
2249 } 2499 }
2250 if (e->flags & QLA_EVT_FLAG_FREE) 2500 if (e->flags & QLA_EVT_FLAG_FREE)
2251 kfree(e); 2501 kfree(e);
2252 spin_lock_irq(&pha->hardware_lock); 2502 spin_lock_irq(&ha->hardware_lock);
2503 }
2504 spin_unlock_irq(&ha->hardware_lock);
2505}
2506/* Relogins all the fcports of a vport
2507 * Context: dpc thread
2508 */
2509void qla2x00_relogin(struct scsi_qla_host *vha)
2510{
2511 fc_port_t *fcport;
2512 uint8_t status;
2513 uint16_t next_loopid = 0;
2514 struct qla_hw_data *ha = vha->hw;
2515
2516 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2517 /*
2518 * If the port is not ONLINE then try to login
2519 * to it if we haven't run out of retries.
2520 */
2521 if (atomic_read(&fcport->state) !=
2522 FCS_ONLINE && fcport->login_retry) {
2523
2524 if (fcport->flags & FCF_FABRIC_DEVICE) {
2525 if (fcport->flags & FCF_TAPE_PRESENT)
2526 ha->isp_ops->fabric_logout(vha,
2527 fcport->loop_id,
2528 fcport->d_id.b.domain,
2529 fcport->d_id.b.area,
2530 fcport->d_id.b.al_pa);
2531
2532 status = qla2x00_fabric_login(vha, fcport,
2533 &next_loopid);
2534 } else
2535 status = qla2x00_local_device_login(vha,
2536 fcport);
2537
2538 fcport->login_retry--;
2539 if (status == QLA_SUCCESS) {
2540 fcport->old_loop_id = fcport->loop_id;
2541
2542 DEBUG(printk("scsi(%ld): port login OK: logged "
2543 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2544
2545 qla2x00_update_fcport(vha, fcport);
2546
2547 } else if (status == 1) {
2548 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2549 /* retry the login again */
2550 DEBUG(printk("scsi(%ld): Retrying"
2551 " %d login again loop_id 0x%x\n",
2552 vha->host_no, fcport->login_retry,
2553 fcport->loop_id));
2554 } else {
2555 fcport->login_retry = 0;
2556 }
2557
2558 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2559 fcport->loop_id = FC_NO_LOOP_ID;
2560 }
2561 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2562 break;
2253 } 2563 }
2254 spin_unlock_irq(&pha->hardware_lock);
2255} 2564}
2256 2565
2257/************************************************************************** 2566/**************************************************************************
@@ -2271,15 +2580,11 @@ static int
2271qla2x00_do_dpc(void *data) 2580qla2x00_do_dpc(void *data)
2272{ 2581{
2273 int rval; 2582 int rval;
2274 scsi_qla_host_t *ha; 2583 scsi_qla_host_t *base_vha;
2275 fc_port_t *fcport; 2584 struct qla_hw_data *ha;
2276 uint8_t status;
2277 uint16_t next_loopid;
2278 struct scsi_qla_host *vha;
2279 int i;
2280 2585
2281 2586 ha = (struct qla_hw_data *)data;
2282 ha = (scsi_qla_host_t *)data; 2587 base_vha = pci_get_drvdata(ha->pdev);
2283 2588
2284 set_user_nice(current, -20); 2589 set_user_nice(current, -20);
2285 2590
@@ -2293,10 +2598,10 @@ qla2x00_do_dpc(void *data)
2293 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2598 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2294 2599
2295 /* Initialization not yet finished. Don't do anything yet. */ 2600 /* Initialization not yet finished. Don't do anything yet. */
2296 if (!ha->flags.init_done) 2601 if (!base_vha->flags.init_done)
2297 continue; 2602 continue;
2298 2603
2299 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2604 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2300 2605
2301 ha->dpc_active = 1; 2606 ha->dpc_active = 1;
2302 2607
@@ -2305,149 +2610,98 @@ qla2x00_do_dpc(void *data)
2305 continue; 2610 continue;
2306 } 2611 }
2307 2612
2308 qla2x00_do_work(ha); 2613 qla2x00_do_work(base_vha);
2309 2614
2310 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2615 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2616 &base_vha->dpc_flags)) {
2311 2617
2312 DEBUG(printk("scsi(%ld): dpc: sched " 2618 DEBUG(printk("scsi(%ld): dpc: sched "
2313 "qla2x00_abort_isp ha = %p\n", 2619 "qla2x00_abort_isp ha = %p\n",
2314 ha->host_no, ha)); 2620 base_vha->host_no, ha));
2315 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2621 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2316 &ha->dpc_flags))) { 2622 &base_vha->dpc_flags))) {
2317 2623
2318 if (qla2x00_abort_isp(ha)) { 2624 if (qla2x00_abort_isp(base_vha)) {
2319 /* failed. retry later */ 2625 /* failed. retry later */
2320 set_bit(ISP_ABORT_NEEDED, 2626 set_bit(ISP_ABORT_NEEDED,
2321 &ha->dpc_flags); 2627 &base_vha->dpc_flags);
2322 }
2323 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2324 }
2325
2326 for_each_mapped_vp_idx(ha, i) {
2327 list_for_each_entry(vha, &ha->vp_list,
2328 vp_list) {
2329 if (i == vha->vp_idx) {
2330 set_bit(ISP_ABORT_NEEDED,
2331 &vha->dpc_flags);
2332 break;
2333 }
2334 } 2628 }
2629 clear_bit(ABORT_ISP_ACTIVE,
2630 &base_vha->dpc_flags);
2335 } 2631 }
2336 2632
2337 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2633 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2338 ha->host_no)); 2634 base_vha->host_no));
2339 } 2635 }
2340 2636
2341 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2637 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2342 qla2x00_update_fcports(ha); 2638 qla2x00_update_fcports(base_vha);
2343 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2639 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2344 } 2640 }
2345 2641
2346 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2642 if (test_and_clear_bit(RESET_MARKER_NEEDED,
2347 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2643 &base_vha->dpc_flags) &&
2644 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2348 2645
2349 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2646 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2350 ha->host_no)); 2647 base_vha->host_no));
2351 2648
2352 qla2x00_rst_aen(ha); 2649 qla2x00_rst_aen(base_vha);
2353 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2650 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2354 } 2651 }
2355 2652
2356 /* Retry each device up to login retry count */ 2653 /* Retry each device up to login retry count */
2357 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2654 if ((test_and_clear_bit(RELOGIN_NEEDED,
2358 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2655 &base_vha->dpc_flags)) &&
2359 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2656 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2657 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2360 2658
2361 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2659 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2362 ha->host_no)); 2660 base_vha->host_no));
2363 2661 qla2x00_relogin(base_vha);
2364 next_loopid = 0; 2662
2365 list_for_each_entry(fcport, &ha->fcports, list) {
2366 /*
2367 * If the port is not ONLINE then try to login
2368 * to it if we haven't run out of retries.
2369 */
2370 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2371 fcport->login_retry) {
2372
2373 if (fcport->flags & FCF_FABRIC_DEVICE) {
2374 if (fcport->flags &
2375 FCF_TAPE_PRESENT)
2376 ha->isp_ops->fabric_logout(
2377 ha, fcport->loop_id,
2378 fcport->d_id.b.domain,
2379 fcport->d_id.b.area,
2380 fcport->d_id.b.al_pa);
2381 status = qla2x00_fabric_login(
2382 ha, fcport, &next_loopid);
2383 } else
2384 status =
2385 qla2x00_local_device_login(
2386 ha, fcport);
2387
2388 fcport->login_retry--;
2389 if (status == QLA_SUCCESS) {
2390 fcport->old_loop_id = fcport->loop_id;
2391
2392 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
2393 ha->host_no, fcport->loop_id));
2394
2395 qla2x00_update_fcport(ha,
2396 fcport);
2397 } else if (status == 1) {
2398 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
2399 /* retry the login again */
2400 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
2401 ha->host_no,
2402 fcport->login_retry, fcport->loop_id));
2403 } else {
2404 fcport->login_retry = 0;
2405 }
2406 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2407 fcport->loop_id = FC_NO_LOOP_ID;
2408 }
2409 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2410 break;
2411 }
2412 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2663 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2413 ha->host_no)); 2664 base_vha->host_no));
2414 } 2665 }
2415 2666
2416 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2667 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2668 &base_vha->dpc_flags)) {
2417 2669
2418 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2670 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2419 ha->host_no)); 2671 base_vha->host_no));
2420 2672
2421 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2673 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2422 &ha->dpc_flags))) { 2674 &base_vha->dpc_flags))) {
2423 2675
2424 rval = qla2x00_loop_resync(ha); 2676 rval = qla2x00_loop_resync(base_vha);
2425 2677
2426 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2678 clear_bit(LOOP_RESYNC_ACTIVE,
2679 &base_vha->dpc_flags);
2427 } 2680 }
2428 2681
2429 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2682 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2430 ha->host_no)); 2683 base_vha->host_no));
2431 } 2684 }
2432 2685
2433 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2686 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2434 atomic_read(&ha->loop_state) == LOOP_READY) { 2687 atomic_read(&base_vha->loop_state) == LOOP_READY) {
2435 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2688 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2436 qla2xxx_flash_npiv_conf(ha); 2689 qla2xxx_flash_npiv_conf(base_vha);
2437 } 2690 }
2438 2691
2439 if (!ha->interrupts_on) 2692 if (!ha->interrupts_on)
2440 ha->isp_ops->enable_intrs(ha); 2693 ha->isp_ops->enable_intrs(ha);
2441 2694
2442 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2695 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2443 ha->isp_ops->beacon_blink(ha); 2696 &base_vha->dpc_flags))
2697 ha->isp_ops->beacon_blink(base_vha);
2444 2698
2445 qla2x00_do_dpc_all_vps(ha); 2699 qla2x00_do_dpc_all_vps(base_vha);
2446 2700
2447 ha->dpc_active = 0; 2701 ha->dpc_active = 0;
2448 } /* End of while(1) */ 2702 } /* End of while(1) */
2449 2703
2450 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2704 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2451 2705
2452 /* 2706 /*
2453 * Make sure that nobody tries to wake us up again. 2707 * Make sure that nobody tries to wake us up again.
@@ -2458,11 +2712,12 @@ qla2x00_do_dpc(void *data)
2458} 2712}
2459 2713
2460void 2714void
2461qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2715qla2xxx_wake_dpc(struct scsi_qla_host *vha)
2462{ 2716{
2717 struct qla_hw_data *ha = vha->hw;
2463 struct task_struct *t = ha->dpc_thread; 2718 struct task_struct *t = ha->dpc_thread;
2464 2719
2465 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2720 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
2466 wake_up_process(t); 2721 wake_up_process(t);
2467} 2722}
2468 2723
@@ -2474,26 +2729,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
2474* ha = adapter block pointer. 2729* ha = adapter block pointer.
2475*/ 2730*/
2476static void 2731static void
2477qla2x00_rst_aen(scsi_qla_host_t *ha) 2732qla2x00_rst_aen(scsi_qla_host_t *vha)
2478{ 2733{
2479 if (ha->flags.online && !ha->flags.reset_active && 2734 if (vha->flags.online && !vha->flags.reset_active &&
2480 !atomic_read(&ha->loop_down_timer) && 2735 !atomic_read(&vha->loop_down_timer) &&
2481 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2736 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
2482 do { 2737 do {
2483 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2738 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2484 2739
2485 /* 2740 /*
2486 * Issue marker command only when we are going to start 2741 * Issue marker command only when we are going to start
2487 * the I/O. 2742 * the I/O.
2488 */ 2743 */
2489 ha->marker_needed = 1; 2744 vha->marker_needed = 1;
2490 } while (!atomic_read(&ha->loop_down_timer) && 2745 } while (!atomic_read(&vha->loop_down_timer) &&
2491 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2746 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
2492 } 2747 }
2493} 2748}
2494 2749
2495static void 2750static void
2496qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2751qla2x00_sp_free_dma(srb_t *sp)
2497{ 2752{
2498 struct scsi_cmnd *cmd = sp->cmd; 2753 struct scsi_cmnd *cmd = sp->cmd;
2499 2754
@@ -2505,11 +2760,11 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2505} 2760}
2506 2761
2507void 2762void
2508qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2763qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
2509{ 2764{
2510 struct scsi_cmnd *cmd = sp->cmd; 2765 struct scsi_cmnd *cmd = sp->cmd;
2511 2766
2512 qla2x00_sp_free_dma(ha, sp); 2767 qla2x00_sp_free_dma(sp);
2513 2768
2514 mempool_free(sp, ha->srb_mempool); 2769 mempool_free(sp, ha->srb_mempool);
2515 2770
@@ -2525,7 +2780,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2525* Context: Interrupt 2780* Context: Interrupt
2526***************************************************************************/ 2781***************************************************************************/
2527void 2782void
2528qla2x00_timer(scsi_qla_host_t *ha) 2783qla2x00_timer(scsi_qla_host_t *vha)
2529{ 2784{
2530 unsigned long cpu_flags = 0; 2785 unsigned long cpu_flags = 0;
2531 fc_port_t *fcport; 2786 fc_port_t *fcport;
@@ -2533,8 +2788,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
2533 int index; 2788 int index;
2534 srb_t *sp; 2789 srb_t *sp;
2535 int t; 2790 int t;
2536 scsi_qla_host_t *pha = to_qla_parent(ha); 2791 struct qla_hw_data *ha = vha->hw;
2537 2792 struct req_que *req;
2538 /* 2793 /*
2539 * Ports - Port down timer. 2794 * Ports - Port down timer.
2540 * 2795 *
@@ -2543,7 +2798,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2543 * the port it marked DEAD. 2798 * the port it marked DEAD.
2544 */ 2799 */
2545 t = 0; 2800 t = 0;
2546 list_for_each_entry(fcport, &ha->fcports, list) { 2801 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2547 if (fcport->port_type != FCT_TARGET) 2802 if (fcport->port_type != FCT_TARGET)
2548 continue; 2803 continue;
2549 2804
@@ -2557,7 +2812,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2557 2812
2558 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2813 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
2559 "%d remaining\n", 2814 "%d remaining\n",
2560 ha->host_no, 2815 vha->host_no,
2561 t, atomic_read(&fcport->port_down_timer))); 2816 t, atomic_read(&fcport->port_down_timer)));
2562 } 2817 }
2563 t++; 2818 t++;
@@ -2565,30 +2820,32 @@ qla2x00_timer(scsi_qla_host_t *ha)
2565 2820
2566 2821
2567 /* Loop down handler. */ 2822 /* Loop down handler. */
2568 if (atomic_read(&ha->loop_down_timer) > 0 && 2823 if (atomic_read(&vha->loop_down_timer) > 0 &&
2569 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2824 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
2825 && vha->flags.online) {
2570 2826
2571 if (atomic_read(&ha->loop_down_timer) == 2827 if (atomic_read(&vha->loop_down_timer) ==
2572 ha->loop_down_abort_time) { 2828 vha->loop_down_abort_time) {
2573 2829
2574 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2830 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
2575 "queues before time expire\n", 2831 "queues before time expire\n",
2576 ha->host_no)); 2832 vha->host_no));
2577 2833
2578 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2834 if (!IS_QLA2100(ha) && vha->link_down_timeout)
2579 atomic_set(&ha->loop_state, LOOP_DEAD); 2835 atomic_set(&vha->loop_state, LOOP_DEAD);
2580 2836
2581 /* Schedule an ISP abort to return any tape commands. */ 2837 /* Schedule an ISP abort to return any tape commands. */
2582 /* NPIV - scan physical port only */ 2838 /* NPIV - scan physical port only */
2583 if (!ha->parent) { 2839 if (!vha->vp_idx) {
2584 spin_lock_irqsave(&ha->hardware_lock, 2840 spin_lock_irqsave(&ha->hardware_lock,
2585 cpu_flags); 2841 cpu_flags);
2842 req = ha->req_q_map[0];
2586 for (index = 1; 2843 for (index = 1;
2587 index < MAX_OUTSTANDING_COMMANDS; 2844 index < MAX_OUTSTANDING_COMMANDS;
2588 index++) { 2845 index++) {
2589 fc_port_t *sfcp; 2846 fc_port_t *sfcp;
2590 2847
2591 sp = ha->outstanding_cmds[index]; 2848 sp = req->outstanding_cmds[index];
2592 if (!sp) 2849 if (!sp)
2593 continue; 2850 continue;
2594 sfcp = sp->fcport; 2851 sfcp = sp->fcport;
@@ -2596,63 +2853,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
2596 continue; 2853 continue;
2597 2854
2598 set_bit(ISP_ABORT_NEEDED, 2855 set_bit(ISP_ABORT_NEEDED,
2599 &ha->dpc_flags); 2856 &vha->dpc_flags);
2600 break; 2857 break;
2601 } 2858 }
2602 spin_unlock_irqrestore(&ha->hardware_lock, 2859 spin_unlock_irqrestore(&ha->hardware_lock,
2603 cpu_flags); 2860 cpu_flags);
2604 } 2861 }
2605 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2862 set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
2606 start_dpc++; 2863 start_dpc++;
2607 } 2864 }
2608 2865
2609 /* if the loop has been down for 4 minutes, reinit adapter */ 2866 /* if the loop has been down for 4 minutes, reinit adapter */
2610 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2867 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
2611 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2868 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
2612 "restarting queues.\n", 2869 "restarting queues.\n",
2613 ha->host_no)); 2870 vha->host_no));
2614 2871
2615 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2872 set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
2616 start_dpc++; 2873 start_dpc++;
2617 2874
2618 if (!(ha->device_flags & DFLG_NO_CABLE) && 2875 if (!(vha->device_flags & DFLG_NO_CABLE) &&
2619 !ha->parent) { 2876 !vha->vp_idx) {
2620 DEBUG(printk("scsi(%ld): Loop down - " 2877 DEBUG(printk("scsi(%ld): Loop down - "
2621 "aborting ISP.\n", 2878 "aborting ISP.\n",
2622 ha->host_no)); 2879 vha->host_no));
2623 qla_printk(KERN_WARNING, ha, 2880 qla_printk(KERN_WARNING, ha,
2624 "Loop down - aborting ISP.\n"); 2881 "Loop down - aborting ISP.\n");
2625 2882
2626 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2883 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2627 } 2884 }
2628 } 2885 }
2629 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2886 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
2630 ha->host_no, 2887 vha->host_no,
2631 atomic_read(&ha->loop_down_timer))); 2888 atomic_read(&vha->loop_down_timer)));
2632 } 2889 }
2633 2890
2634 /* Check if beacon LED needs to be blinked */ 2891 /* Check if beacon LED needs to be blinked */
2635 if (ha->beacon_blink_led == 1) { 2892 if (ha->beacon_blink_led == 1) {
2636 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2893 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
2637 start_dpc++; 2894 start_dpc++;
2638 } 2895 }
2639 2896
2640 /* Process any deferred work. */ 2897 /* Process any deferred work. */
2641 if (!list_empty(&ha->work_list)) 2898 if (!list_empty(&vha->work_list))
2642 start_dpc++; 2899 start_dpc++;
2643 2900
2644 /* Schedule the DPC routine if needed */ 2901 /* Schedule the DPC routine if needed */
2645 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2902 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2646 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2903 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
2647 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2904 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
2648 start_dpc || 2905 start_dpc ||
2649 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2906 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
2650 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2907 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
2651 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
2652 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
2653 qla2xxx_wake_dpc(pha); 2910 qla2xxx_wake_dpc(vha);
2654 2911
2655 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2912 qla2x00_restart_timer(vha, WATCH_INTERVAL);
2656} 2913}
2657 2914
2658/* Firmware interface routines. */ 2915/* Firmware interface routines. */
@@ -2684,8 +2941,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2684}; 2941};
2685 2942
2686struct fw_blob * 2943struct fw_blob *
2687qla2x00_request_firmware(scsi_qla_host_t *ha) 2944qla2x00_request_firmware(scsi_qla_host_t *vha)
2688{ 2945{
2946 struct qla_hw_data *ha = vha->hw;
2689 struct fw_blob *blob; 2947 struct fw_blob *blob;
2690 2948
2691 blob = NULL; 2949 blob = NULL;
@@ -2709,7 +2967,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2709 2967
2710 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2968 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
2711 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2969 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
2712 "(%s).\n", ha->host_no, blob->name)); 2970 "(%s).\n", vha->host_no, blob->name));
2713 blob->fw = NULL; 2971 blob->fw = NULL;
2714 blob = NULL; 2972 blob = NULL;
2715 goto out; 2973 goto out;
@@ -2754,7 +3012,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2754 int risc_paused = 0; 3012 int risc_paused = 0;
2755 uint32_t stat; 3013 uint32_t stat;
2756 unsigned long flags; 3014 unsigned long flags;
2757 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3015 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3016 struct qla_hw_data *ha = base_vha->hw;
2758 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3017 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2759 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 3018 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2760 3019
@@ -2777,7 +3036,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2777 if (risc_paused) { 3036 if (risc_paused) {
2778 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 3037 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
2779 "Dumping firmware!\n"); 3038 "Dumping firmware!\n");
2780 ha->isp_ops->fw_dump(ha, 0); 3039 ha->isp_ops->fw_dump(base_vha, 0);
2781 3040
2782 return PCI_ERS_RESULT_NEED_RESET; 3041 return PCI_ERS_RESULT_NEED_RESET;
2783 } else 3042 } else
@@ -2788,7 +3047,8 @@ static pci_ers_result_t
2788qla2xxx_pci_slot_reset(struct pci_dev *pdev) 3047qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2789{ 3048{
2790 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3049 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
2791 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3050 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3051 struct qla_hw_data *ha = base_vha->hw;
2792 int rc; 3052 int rc;
2793 3053
2794 if (ha->mem_only) 3054 if (ha->mem_only)
@@ -2804,13 +3064,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2804 } 3064 }
2805 pci_set_master(pdev); 3065 pci_set_master(pdev);
2806 3066
2807 if (ha->isp_ops->pci_config(ha)) 3067 if (ha->isp_ops->pci_config(base_vha))
2808 return ret; 3068 return ret;
2809 3069
2810 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3070 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2811 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 3071 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
2812 ret = PCI_ERS_RESULT_RECOVERED; 3072 ret = PCI_ERS_RESULT_RECOVERED;
2813 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3073 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2814 3074
2815 return ret; 3075 return ret;
2816} 3076}
@@ -2818,10 +3078,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2818static void 3078static void
2819qla2xxx_pci_resume(struct pci_dev *pdev) 3079qla2xxx_pci_resume(struct pci_dev *pdev)
2820{ 3080{
2821 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3081 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3082 struct qla_hw_data *ha = base_vha->hw;
2822 int ret; 3083 int ret;
2823 3084
2824 ret = qla2x00_wait_for_hba_online(ha); 3085 ret = qla2x00_wait_for_hba_online(base_vha);
2825 if (ret != QLA_SUCCESS) { 3086 if (ret != QLA_SUCCESS) {
2826 qla_printk(KERN_ERR, ha, 3087 qla_printk(KERN_ERR, ha,
2827 "the device failed to resume I/O " 3088 "the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index e4af678eb2d6..c538ee1b1a31 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -10,10 +10,6 @@
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12 12
13static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t);
14static void qla2x00_nv_deselect(scsi_qla_host_t *);
15static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
16
17/* 13/*
18 * NVRAM support routines 14 * NVRAM support routines
19 */ 15 */
@@ -23,7 +19,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
23 * @ha: HA context 19 * @ha: HA context
24 */ 20 */
25static void 21static void
26qla2x00_lock_nvram_access(scsi_qla_host_t *ha) 22qla2x00_lock_nvram_access(struct qla_hw_data *ha)
27{ 23{
28 uint16_t data; 24 uint16_t data;
29 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 25 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -56,7 +52,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
56 * @ha: HA context 52 * @ha: HA context
57 */ 53 */
58static void 54static void
59qla2x00_unlock_nvram_access(scsi_qla_host_t *ha) 55qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
60{ 56{
61 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 57 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
62 58
@@ -67,6 +63,84 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
67} 63}
68 64
69/** 65/**
66 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
67 * @ha: HA context
68 * @data: Serial interface selector
69 */
70static void
71qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
72{
73 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
74
75 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
76 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
77 NVRAM_DELAY();
78 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
79 NVR_WRT_ENABLE);
80 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
81 NVRAM_DELAY();
82 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
83 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
84 NVRAM_DELAY();
85}
86
87/**
88 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
89 * NVRAM.
90 * @ha: HA context
91 * @nv_cmd: NVRAM command
92 *
93 * Bit definitions for NVRAM command:
94 *
95 * Bit 26 = start bit
96 * Bit 25, 24 = opcode
97 * Bit 23-16 = address
98 * Bit 15-0 = write data
99 *
100 * Returns the word read from nvram @addr.
101 */
102static uint16_t
103qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
104{
105 uint8_t cnt;
106 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
107 uint16_t data = 0;
108 uint16_t reg_data;
109
110 /* Send command to NVRAM. */
111 nv_cmd <<= 5;
112 for (cnt = 0; cnt < 11; cnt++) {
113 if (nv_cmd & BIT_31)
114 qla2x00_nv_write(ha, NVR_DATA_OUT);
115 else
116 qla2x00_nv_write(ha, 0);
117 nv_cmd <<= 1;
118 }
119
120 /* Read data from NVRAM. */
121 for (cnt = 0; cnt < 16; cnt++) {
122 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
123 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
124 NVRAM_DELAY();
125 data <<= 1;
126 reg_data = RD_REG_WORD(&reg->nvram);
127 if (reg_data & NVR_DATA_IN)
128 data |= BIT_0;
129 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
130 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
131 NVRAM_DELAY();
132 }
133
134 /* Deselect chip. */
135 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
136 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
137 NVRAM_DELAY();
138
139 return data;
140}
141
142
143/**
70 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the 144 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
71 * request routine to get the word from NVRAM. 145 * request routine to get the word from NVRAM.
72 * @ha: HA context 146 * @ha: HA context
@@ -75,7 +149,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
75 * Returns the word read from nvram @addr. 149 * Returns the word read from nvram @addr.
76 */ 150 */
77static uint16_t 151static uint16_t
78qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr) 152qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
79{ 153{
80 uint16_t data; 154 uint16_t data;
81 uint32_t nv_cmd; 155 uint32_t nv_cmd;
@@ -88,13 +162,27 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
88} 162}
89 163
90/** 164/**
165 * qla2x00_nv_deselect() - Deselect NVRAM operations.
166 * @ha: HA context
167 */
168static void
169qla2x00_nv_deselect(struct qla_hw_data *ha)
170{
171 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
172
173 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
174 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
175 NVRAM_DELAY();
176}
177
178/**
91 * qla2x00_write_nvram_word() - Write NVRAM data. 179 * qla2x00_write_nvram_word() - Write NVRAM data.
92 * @ha: HA context 180 * @ha: HA context
93 * @addr: Address in NVRAM to write 181 * @addr: Address in NVRAM to write
94 * @data: word to program 182 * @data: word to program
95 */ 183 */
96static void 184static void
97qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data) 185qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
98{ 186{
99 int count; 187 int count;
100 uint16_t word; 188 uint16_t word;
@@ -132,7 +220,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
132 do { 220 do {
133 if (!--wait_cnt) { 221 if (!--wait_cnt) {
134 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
135 __func__, ha->host_no)); 223 __func__, vha->host_no));
136 break; 224 break;
137 } 225 }
138 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -150,8 +238,8 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
150} 238}
151 239
152static int 240static int
153qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data, 241qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
154 uint32_t tmo) 242 uint16_t data, uint32_t tmo)
155{ 243{
156 int ret, count; 244 int ret, count;
157 uint16_t word; 245 uint16_t word;
@@ -209,102 +297,11 @@ qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
209} 297}
210 298
211/** 299/**
212 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
213 * NVRAM.
214 * @ha: HA context
215 * @nv_cmd: NVRAM command
216 *
217 * Bit definitions for NVRAM command:
218 *
219 * Bit 26 = start bit
220 * Bit 25, 24 = opcode
221 * Bit 23-16 = address
222 * Bit 15-0 = write data
223 *
224 * Returns the word read from nvram @addr.
225 */
226static uint16_t
227qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
228{
229 uint8_t cnt;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 uint16_t data = 0;
232 uint16_t reg_data;
233
234 /* Send command to NVRAM. */
235 nv_cmd <<= 5;
236 for (cnt = 0; cnt < 11; cnt++) {
237 if (nv_cmd & BIT_31)
238 qla2x00_nv_write(ha, NVR_DATA_OUT);
239 else
240 qla2x00_nv_write(ha, 0);
241 nv_cmd <<= 1;
242 }
243
244 /* Read data from NVRAM. */
245 for (cnt = 0; cnt < 16; cnt++) {
246 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
247 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
248 NVRAM_DELAY();
249 data <<= 1;
250 reg_data = RD_REG_WORD(&reg->nvram);
251 if (reg_data & NVR_DATA_IN)
252 data |= BIT_0;
253 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
254 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
255 NVRAM_DELAY();
256 }
257
258 /* Deselect chip. */
259 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
260 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
261 NVRAM_DELAY();
262
263 return (data);
264}
265
266/**
267 * qla2x00_nv_write() - Clean NVRAM operations.
268 * @ha: HA context
269 */
270static void
271qla2x00_nv_deselect(scsi_qla_host_t *ha)
272{
273 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274
275 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
276 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
277 NVRAM_DELAY();
278}
279
280/**
281 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
282 * @ha: HA context
283 * @data: Serial interface selector
284 */
285static void
286qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
287{
288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
289
290 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
291 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
292 NVRAM_DELAY();
293 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT| NVR_CLOCK |
294 NVR_WRT_ENABLE);
295 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
296 NVRAM_DELAY();
297 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
298 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
299 NVRAM_DELAY();
300}
301
302/**
303 * qla2x00_clear_nvram_protection() - 300 * qla2x00_clear_nvram_protection() -
304 * @ha: HA context 301 * @ha: HA context
305 */ 302 */
306static int 303static int
307qla2x00_clear_nvram_protection(scsi_qla_host_t *ha) 304qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308{ 305{
309 int ret, stat; 306 int ret, stat;
310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 307 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -352,9 +349,8 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
352 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
353 do { 350 do {
354 if (!--wait_cnt) { 351 if (!--wait_cnt) {
355 DEBUG9_10(printk("%s(%ld): NVRAM didn't go " 352 DEBUG9_10(qla_printk(
356 "ready...\n", __func__, 353 "NVRAM didn't go ready...\n"));
357 ha->host_no));
358 break; 354 break;
359 } 355 }
360 NVRAM_DELAY(); 356 NVRAM_DELAY();
@@ -370,7 +366,7 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
370} 366}
371 367
372static void 368static void
373qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat) 369qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
374{ 370{
375 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 371 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
376 uint32_t word, wait_cnt; 372 uint32_t word, wait_cnt;
@@ -412,8 +408,7 @@ qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
412 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
413 do { 409 do {
414 if (!--wait_cnt) { 410 if (!--wait_cnt) {
415 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
416 __func__, ha->host_no));
417 break; 412 break;
418 } 413 }
419 NVRAM_DELAY(); 414 NVRAM_DELAY();
@@ -454,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
454} 449}
455 450
456static uint32_t 451static uint32_t
457qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) 452qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
458{ 453{
459 int rval; 454 int rval;
460 uint32_t cnt, data; 455 uint32_t cnt, data;
@@ -482,21 +477,20 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
482} 477}
483 478
484uint32_t * 479uint32_t *
485qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 480qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
486 uint32_t dwords) 481 uint32_t dwords)
487{ 482{
488 uint32_t i; 483 uint32_t i;
489
490 /* Dword reads to flash. */ 484 /* Dword reads to flash. */
491 for (i = 0; i < dwords; i++, faddr++) 485 for (i = 0; i < dwords; i++, faddr++)
492 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 486 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
493 flash_data_to_access_addr(faddr))); 487 flash_data_to_access_addr(faddr)));
494 488
495 return dwptr; 489 return dwptr;
496} 490}
497 491
498static int 492static int
499qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) 493qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
500{ 494{
501 int rval; 495 int rval;
502 uint32_t cnt; 496 uint32_t cnt;
@@ -519,7 +513,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
519} 513}
520 514
521static void 515static void
522qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 516qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
523 uint8_t *flash_id) 517 uint8_t *flash_id)
524{ 518{
525 uint32_t ids; 519 uint32_t ids;
@@ -544,7 +538,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
544} 538}
545 539
546static int 540static int
547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start) 541qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
548{ 542{
549 const char *loc, *locations[] = { "DEF", "PCI" }; 543 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids; 544 uint32_t pcihdr, pcids;
@@ -552,6 +546,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
552 uint8_t *buf, *bcode, last_image; 546 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr; 547 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl; 548 struct qla_flt_location *fltl;
549 struct qla_hw_data *ha = vha->hw;
550 struct req_que *req = ha->req_q_map[0];
555 551
556 /* 552 /*
557 * FLT-location structure resides after the last PCI region. 553 * FLT-location structure resides after the last PCI region.
@@ -563,20 +559,20 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
563 FA_FLASH_LAYOUT_ADDR; 559 FA_FLASH_LAYOUT_ADDR;
564 560
565 /* Begin with first PCI expansion ROM header. */ 561 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring; 562 buf = (uint8_t *)req->ring;
567 dcode = (uint32_t *)ha->request_ring; 563 dcode = (uint32_t *)req->ring;
568 pcihdr = 0; 564 pcihdr = 0;
569 last_image = 1; 565 last_image = 1;
570 do { 566 do {
571 /* Verify PCI expansion ROM header. */ 567 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 568 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4); 569 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) 570 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end; 571 goto end;
576 572
577 /* Locate PCI data structure. */ 573 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 574 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 575 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4); 576 bcode = buf + (pcihdr % 4);
581 577
582 /* Validate signature of PCI data structure. */ 578 /* Validate signature of PCI data structure. */
@@ -591,14 +587,14 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
591 } while (!last_image); 587 } while (!last_image);
592 588
593 /* Now verify FLT-location structure. */ 589 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring; 590 fltl = (struct qla_flt_location *)req->ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 591 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2); 592 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' || 593 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T') 594 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end; 595 goto end;
600 596
601 wptr = (uint16_t *)ha->request_ring; 597 wptr = (uint16_t *)req->ring;
602 cnt = sizeof(struct qla_flt_location) >> 1; 598 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--) 599 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++); 600 chksum += le16_to_cpu(*wptr++);
@@ -619,7 +615,7 @@ end:
619} 615}
620 616
621static void 617static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr) 618qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
623{ 619{
624 const char *loc, *locations[] = { "DEF", "FLT" }; 620 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr; 621 uint16_t *wptr;
@@ -627,12 +623,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
627 uint32_t start; 623 uint32_t start;
628 struct qla_flt_header *flt; 624 struct qla_flt_header *flt;
629 struct qla_flt_region *region; 625 struct qla_flt_region *region;
626 struct qla_hw_data *ha = vha->hw;
627 struct req_que *req = ha->req_q_map[0];
630 628
631 ha->flt_region_flt = flt_addr; 629 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring; 630 wptr = (uint16_t *)req->ring;
633 flt = (struct qla_flt_header *)ha->request_ring; 631 flt = (struct qla_flt_header *)req->ring;
634 region = (struct qla_flt_region *)&flt[1]; 632 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 633 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
636 flt_addr << 2, OPTROM_BURST_SIZE); 634 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff)) 635 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data; 636 goto no_flash_data;
@@ -720,7 +718,7 @@ done:
720} 718}
721 719
722static void 720static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 721qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
724{ 722{
725#define FLASH_BLK_SIZE_4K 0x1000 723#define FLASH_BLK_SIZE_4K 0x1000
726#define FLASH_BLK_SIZE_32K 0x8000 724#define FLASH_BLK_SIZE_32K 0x8000
@@ -731,10 +729,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
731 struct qla_fdt_layout *fdt; 729 struct qla_fdt_layout *fdt;
732 uint8_t man_id, flash_id; 730 uint8_t man_id, flash_id;
733 uint16_t mid, fid; 731 uint16_t mid, fid;
732 struct qla_hw_data *ha = vha->hw;
733 struct req_que *req = ha->req_q_map[0];
734 734
735 wptr = (uint16_t *)ha->request_ring; 735 wptr = (uint16_t *)req->ring;
736 fdt = (struct qla_fdt_layout *)ha->request_ring; 736 fdt = (struct qla_fdt_layout *)req->ring;
737 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 737 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); 738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
739 if (*wptr == __constant_cpu_to_le16(0xffff)) 739 if (*wptr == __constant_cpu_to_le16(0xffff))
740 goto no_flash_data; 740 goto no_flash_data;
@@ -807,38 +807,41 @@ done:
807} 807}
808 808
809int 809int
810qla2xxx_get_flash_info(scsi_qla_host_t *ha) 810qla2xxx_get_flash_info(scsi_qla_host_t *vha)
811{ 811{
812 int ret; 812 int ret;
813 uint32_t flt_addr; 813 uint32_t flt_addr;
814 struct qla_hw_data *ha = vha->hw;
814 815
815 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
816 return QLA_SUCCESS; 817 return QLA_SUCCESS;
817 818
818 ret = qla2xxx_find_flt_start(ha, &flt_addr); 819 ret = qla2xxx_find_flt_start(vha, &flt_addr);
819 if (ret != QLA_SUCCESS) 820 if (ret != QLA_SUCCESS)
820 return ret; 821 return ret;
821 822
822 qla2xxx_get_flt_info(ha, flt_addr); 823 qla2xxx_get_flt_info(vha, flt_addr);
823 qla2xxx_get_fdt_info(ha); 824 qla2xxx_get_fdt_info(vha);
824 825
825 return QLA_SUCCESS; 826 return QLA_SUCCESS;
826} 827}
827 828
828void 829void
829qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha) 830qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
830{ 831{
831#define NPIV_CONFIG_SIZE (16*1024) 832#define NPIV_CONFIG_SIZE (16*1024)
832 void *data; 833 void *data;
833 uint16_t *wptr; 834 uint16_t *wptr;
834 uint16_t cnt, chksum; 835 uint16_t cnt, chksum;
836 int i;
835 struct qla_npiv_header hdr; 837 struct qla_npiv_header hdr;
836 struct qla_npiv_entry *entry; 838 struct qla_npiv_entry *entry;
839 struct qla_hw_data *ha = vha->hw;
837 840
838 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 841 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
839 return; 842 return;
840 843
841 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr, 844 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
842 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 845 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
843 if (hdr.version == __constant_cpu_to_le16(0xffff)) 846 if (hdr.version == __constant_cpu_to_le16(0xffff))
844 return; 847 return;
@@ -857,7 +860,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
857 return; 860 return;
858 } 861 }
859 862
860 ha->isp_ops->read_optrom(ha, (uint8_t *)data, 863 ha->isp_ops->read_optrom(vha, (uint8_t *)data,
861 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); 864 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
862 865
863 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) * 866 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
@@ -874,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
874 877
875 entry = data + sizeof(struct qla_npiv_header); 878 entry = data + sizeof(struct qla_npiv_header);
876 cnt = le16_to_cpu(hdr.entries); 879 cnt = le16_to_cpu(hdr.entries);
877 for ( ; cnt; cnt--, entry++) { 880 for (i = 0; cnt; cnt--, entry++, i++) {
878 uint16_t flags; 881 uint16_t flags;
879 struct fc_vport_identifiers vid; 882 struct fc_vport_identifiers vid;
880 struct fc_vport *vport; 883 struct fc_vport *vport;
@@ -892,25 +895,29 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
892 vid.port_name = wwn_to_u64(entry->port_name); 895 vid.port_name = wwn_to_u64(entry->port_name);
893 vid.node_name = wwn_to_u64(entry->node_name); 896 vid.node_name = wwn_to_u64(entry->node_name);
894 897
898 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
899
895 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " 900 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
896 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt, 901 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
897 (unsigned long long)vid.port_name, 902 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
898 (unsigned long long)vid.node_name, 903 entry->q_qos, entry->f_qos));
899 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos))); 904
900 905 if (i < QLA_PRECONFIG_VPORTS) {
901 vport = fc_vport_create(ha->host, 0, &vid); 906 vport = fc_vport_create(vha->host, 0, &vid);
902 if (!vport) 907 if (!vport)
903 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to " 908 qla_printk(KERN_INFO, ha,
904 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, 909 "NPIV-Config: Failed to create vport [%02x]: "
905 (unsigned long long)vid.port_name, 910 "wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.node_name); 911 vid.port_name, vid.node_name);
912 }
907 } 913 }
908done: 914done:
909 kfree(data); 915 kfree(data);
916 ha->npiv_info = NULL;
910} 917}
911 918
912static void 919static void
913qla24xx_unprotect_flash(scsi_qla_host_t *ha) 920qla24xx_unprotect_flash(struct qla_hw_data *ha)
914{ 921{
915 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
916 923
@@ -929,7 +936,7 @@ qla24xx_unprotect_flash(scsi_qla_host_t *ha)
929} 936}
930 937
931static void 938static void
932qla24xx_protect_flash(scsi_qla_host_t *ha) 939qla24xx_protect_flash(struct qla_hw_data *ha)
933{ 940{
934 uint32_t cnt; 941 uint32_t cnt;
935 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 942 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -955,7 +962,7 @@ skip_wrt_protect:
955} 962}
956 963
957static int 964static int
958qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 965qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
959 uint32_t dwords) 966 uint32_t dwords)
960{ 967{
961 int ret; 968 int ret;
@@ -965,6 +972,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
965 dma_addr_t optrom_dma; 972 dma_addr_t optrom_dma;
966 void *optrom = NULL; 973 void *optrom = NULL;
967 uint32_t *s, *d; 974 uint32_t *s, *d;
975 struct qla_hw_data *ha = vha->hw;
968 976
969 ret = QLA_SUCCESS; 977 ret = QLA_SUCCESS;
970 978
@@ -1002,9 +1010,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1002 (fdata & 0xff00) |((fdata << 16) & 1010 (fdata & 0xff00) |((fdata << 16) &
1003 0xff0000) | ((fdata >> 16) & 0xff)); 1011 0xff0000) | ((fdata >> 16) & 0xff));
1004 if (ret != QLA_SUCCESS) { 1012 if (ret != QLA_SUCCESS) {
1005 DEBUG9(printk("%s(%ld) Unable to flash " 1013 DEBUG9(qla_printk("Unable to flash sector: "
1006 "sector: address=%x.\n", __func__, 1014 "address=%x.\n", faddr));
1007 ha->host_no, faddr));
1008 break; 1015 break;
1009 } 1016 }
1010 } 1017 }
@@ -1016,7 +1023,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1016 miter < OPTROM_BURST_DWORDS; miter++, s++, d++) 1023 miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
1017 *s = cpu_to_le32(*d); 1024 *s = cpu_to_le32(*d);
1018 1025
1019 ret = qla2x00_load_ram(ha, optrom_dma, 1026 ret = qla2x00_load_ram(vha, optrom_dma,
1020 flash_data_to_access_addr(faddr), 1027 flash_data_to_access_addr(faddr),
1021 OPTROM_BURST_DWORDS); 1028 OPTROM_BURST_DWORDS);
1022 if (ret != QLA_SUCCESS) { 1029 if (ret != QLA_SUCCESS) {
@@ -1044,7 +1051,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1044 if (ret != QLA_SUCCESS) { 1051 if (ret != QLA_SUCCESS) {
1045 DEBUG9(printk("%s(%ld) Unable to program flash " 1052 DEBUG9(printk("%s(%ld) Unable to program flash "
1046 "address=%x data=%x.\n", __func__, 1053 "address=%x data=%x.\n", __func__,
1047 ha->host_no, faddr, *dwptr)); 1054 vha->host_no, faddr, *dwptr));
1048 break; 1055 break;
1049 } 1056 }
1050 1057
@@ -1067,11 +1074,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1067} 1074}
1068 1075
1069uint8_t * 1076uint8_t *
1070qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1077qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1071 uint32_t bytes) 1078 uint32_t bytes)
1072{ 1079{
1073 uint32_t i; 1080 uint32_t i;
1074 uint16_t *wptr; 1081 uint16_t *wptr;
1082 struct qla_hw_data *ha = vha->hw;
1075 1083
1076 /* Word reads to NVRAM via registers. */ 1084 /* Word reads to NVRAM via registers. */
1077 wptr = (uint16_t *)buf; 1085 wptr = (uint16_t *)buf;
@@ -1085,7 +1093,7 @@ qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1085} 1093}
1086 1094
1087uint8_t * 1095uint8_t *
1088qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1096qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1089 uint32_t bytes) 1097 uint32_t bytes)
1090{ 1098{
1091 uint32_t i; 1099 uint32_t i;
@@ -1094,20 +1102,21 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1094 /* Dword reads to flash. */ 1102 /* Dword reads to flash. */
1095 dwptr = (uint32_t *)buf; 1103 dwptr = (uint32_t *)buf;
1096 for (i = 0; i < bytes >> 2; i++, naddr++) 1104 for (i = 0; i < bytes >> 2; i++, naddr++)
1097 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1105 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
1098 nvram_data_to_access_addr(naddr))); 1106 nvram_data_to_access_addr(naddr)));
1099 1107
1100 return buf; 1108 return buf;
1101} 1109}
1102 1110
1103int 1111int
1104qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1112qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1105 uint32_t bytes) 1113 uint32_t bytes)
1106{ 1114{
1107 int ret, stat; 1115 int ret, stat;
1108 uint32_t i; 1116 uint32_t i;
1109 uint16_t *wptr; 1117 uint16_t *wptr;
1110 unsigned long flags; 1118 unsigned long flags;
1119 struct qla_hw_data *ha = vha->hw;
1111 1120
1112 ret = QLA_SUCCESS; 1121 ret = QLA_SUCCESS;
1113 1122
@@ -1134,12 +1143,13 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1134} 1143}
1135 1144
1136int 1145int
1137qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1146qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1138 uint32_t bytes) 1147 uint32_t bytes)
1139{ 1148{
1140 int ret; 1149 int ret;
1141 uint32_t i; 1150 uint32_t i;
1142 uint32_t *dwptr; 1151 uint32_t *dwptr;
1152 struct qla_hw_data *ha = vha->hw;
1143 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1153 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1144 1154
1145 ret = QLA_SUCCESS; 1155 ret = QLA_SUCCESS;
@@ -1162,9 +1172,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1162 nvram_data_to_access_addr(naddr), 1172 nvram_data_to_access_addr(naddr),
1163 cpu_to_le32(*dwptr)); 1173 cpu_to_le32(*dwptr));
1164 if (ret != QLA_SUCCESS) { 1174 if (ret != QLA_SUCCESS) {
1165 DEBUG9(printk("%s(%ld) Unable to program " 1175 DEBUG9(qla_printk("Unable to program nvram address=%x "
1166 "nvram address=%x data=%x.\n", __func__, 1176 "data=%x.\n", naddr, *dwptr));
1167 ha->host_no, naddr, *dwptr));
1168 break; 1177 break;
1169 } 1178 }
1170 } 1179 }
@@ -1182,11 +1191,12 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1182} 1191}
1183 1192
1184uint8_t * 1193uint8_t *
1185qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1194qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1186 uint32_t bytes) 1195 uint32_t bytes)
1187{ 1196{
1188 uint32_t i; 1197 uint32_t i;
1189 uint32_t *dwptr; 1198 uint32_t *dwptr;
1199 struct qla_hw_data *ha = vha->hw;
1190 1200
1191 /* Dword reads to flash. */ 1201 /* Dword reads to flash. */
1192 dwptr = (uint32_t *)buf; 1202 dwptr = (uint32_t *)buf;
@@ -1199,19 +1209,20 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1199} 1209}
1200 1210
1201int 1211int
1202qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1212qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1203 uint32_t bytes) 1213 uint32_t bytes)
1204{ 1214{
1215 struct qla_hw_data *ha = vha->hw;
1205#define RMW_BUFFER_SIZE (64 * 1024) 1216#define RMW_BUFFER_SIZE (64 * 1024)
1206 uint8_t *dbuf; 1217 uint8_t *dbuf;
1207 1218
1208 dbuf = vmalloc(RMW_BUFFER_SIZE); 1219 dbuf = vmalloc(RMW_BUFFER_SIZE);
1209 if (!dbuf) 1220 if (!dbuf)
1210 return QLA_MEMORY_ALLOC_FAILED; 1221 return QLA_MEMORY_ALLOC_FAILED;
1211 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1222 ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1212 RMW_BUFFER_SIZE); 1223 RMW_BUFFER_SIZE);
1213 memcpy(dbuf + (naddr << 2), buf, bytes); 1224 memcpy(dbuf + (naddr << 2), buf, bytes);
1214 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1225 ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1215 RMW_BUFFER_SIZE); 1226 RMW_BUFFER_SIZE);
1216 vfree(dbuf); 1227 vfree(dbuf);
1217 1228
@@ -1219,7 +1230,7 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1219} 1230}
1220 1231
1221static inline void 1232static inline void
1222qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1233qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1223{ 1234{
1224 if (IS_QLA2322(ha)) { 1235 if (IS_QLA2322(ha)) {
1225 /* Flip all colors. */ 1236 /* Flip all colors. */
@@ -1249,12 +1260,13 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1249#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) 1260#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
1250 1261
1251void 1262void
1252qla2x00_beacon_blink(struct scsi_qla_host *ha) 1263qla2x00_beacon_blink(struct scsi_qla_host *vha)
1253{ 1264{
1254 uint16_t gpio_enable; 1265 uint16_t gpio_enable;
1255 uint16_t gpio_data; 1266 uint16_t gpio_data;
1256 uint16_t led_color = 0; 1267 uint16_t led_color = 0;
1257 unsigned long flags; 1268 unsigned long flags;
1269 struct qla_hw_data *ha = vha->hw;
1258 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1270 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1259 1271
1260 spin_lock_irqsave(&ha->hardware_lock, flags); 1272 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1298,17 +1310,18 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
1298} 1310}
1299 1311
1300int 1312int
1301qla2x00_beacon_on(struct scsi_qla_host *ha) 1313qla2x00_beacon_on(struct scsi_qla_host *vha)
1302{ 1314{
1303 uint16_t gpio_enable; 1315 uint16_t gpio_enable;
1304 uint16_t gpio_data; 1316 uint16_t gpio_data;
1305 unsigned long flags; 1317 unsigned long flags;
1318 struct qla_hw_data *ha = vha->hw;
1306 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1307 1320
1308 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1321 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1309 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1322 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1310 1323
1311 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1324 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1312 qla_printk(KERN_WARNING, ha, 1325 qla_printk(KERN_WARNING, ha,
1313 "Unable to update fw options (beacon on).\n"); 1326 "Unable to update fw options (beacon on).\n");
1314 return QLA_FUNCTION_FAILED; 1327 return QLA_FUNCTION_FAILED;
@@ -1354,9 +1367,10 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
1354} 1367}
1355 1368
1356int 1369int
1357qla2x00_beacon_off(struct scsi_qla_host *ha) 1370qla2x00_beacon_off(struct scsi_qla_host *vha)
1358{ 1371{
1359 int rval = QLA_SUCCESS; 1372 int rval = QLA_SUCCESS;
1373 struct qla_hw_data *ha = vha->hw;
1360 1374
1361 ha->beacon_blink_led = 0; 1375 ha->beacon_blink_led = 0;
1362 1376
@@ -1366,12 +1380,12 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1366 else 1380 else
1367 ha->beacon_color_state = QLA_LED_GRN_ON; 1381 ha->beacon_color_state = QLA_LED_GRN_ON;
1368 1382
1369 ha->isp_ops->beacon_blink(ha); /* This turns green LED off */ 1383 ha->isp_ops->beacon_blink(vha); /* This turns green LED off */
1370 1384
1371 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1385 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1372 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 1386 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
1373 1387
1374 rval = qla2x00_set_fw_options(ha, ha->fw_options); 1388 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1375 if (rval != QLA_SUCCESS) 1389 if (rval != QLA_SUCCESS)
1376 qla_printk(KERN_WARNING, ha, 1390 qla_printk(KERN_WARNING, ha,
1377 "Unable to update fw options (beacon off).\n"); 1391 "Unable to update fw options (beacon off).\n");
@@ -1380,7 +1394,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1380 1394
1381 1395
1382static inline void 1396static inline void
1383qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1397qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1384{ 1398{
1385 /* Flip all colors. */ 1399 /* Flip all colors. */
1386 if (ha->beacon_color_state == QLA_LED_ALL_ON) { 1400 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
@@ -1395,11 +1409,12 @@ qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1395} 1409}
1396 1410
1397void 1411void
1398qla24xx_beacon_blink(struct scsi_qla_host *ha) 1412qla24xx_beacon_blink(struct scsi_qla_host *vha)
1399{ 1413{
1400 uint16_t led_color = 0; 1414 uint16_t led_color = 0;
1401 uint32_t gpio_data; 1415 uint32_t gpio_data;
1402 unsigned long flags; 1416 unsigned long flags;
1417 struct qla_hw_data *ha = vha->hw;
1403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1418 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1404 1419
1405 /* Save the Original GPIOD. */ 1420 /* Save the Original GPIOD. */
@@ -1428,20 +1443,21 @@ qla24xx_beacon_blink(struct scsi_qla_host *ha)
1428} 1443}
1429 1444
1430int 1445int
1431qla24xx_beacon_on(struct scsi_qla_host *ha) 1446qla24xx_beacon_on(struct scsi_qla_host *vha)
1432{ 1447{
1433 uint32_t gpio_data; 1448 uint32_t gpio_data;
1434 unsigned long flags; 1449 unsigned long flags;
1450 struct qla_hw_data *ha = vha->hw;
1435 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1451 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1436 1452
1437 if (ha->beacon_blink_led == 0) { 1453 if (ha->beacon_blink_led == 0) {
1438 /* Enable firmware for update */ 1454 /* Enable firmware for update */
1439 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 1455 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
1440 1456
1441 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) 1457 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
1442 return QLA_FUNCTION_FAILED; 1458 return QLA_FUNCTION_FAILED;
1443 1459
1444 if (qla2x00_get_fw_options(ha, ha->fw_options) != 1460 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1445 QLA_SUCCESS) { 1461 QLA_SUCCESS) {
1446 qla_printk(KERN_WARNING, ha, 1462 qla_printk(KERN_WARNING, ha,
1447 "Unable to update fw options (beacon on).\n"); 1463 "Unable to update fw options (beacon on).\n");
@@ -1469,16 +1485,17 @@ qla24xx_beacon_on(struct scsi_qla_host *ha)
1469} 1485}
1470 1486
1471int 1487int
1472qla24xx_beacon_off(struct scsi_qla_host *ha) 1488qla24xx_beacon_off(struct scsi_qla_host *vha)
1473{ 1489{
1474 uint32_t gpio_data; 1490 uint32_t gpio_data;
1475 unsigned long flags; 1491 unsigned long flags;
1492 struct qla_hw_data *ha = vha->hw;
1476 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1493 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1477 1494
1478 ha->beacon_blink_led = 0; 1495 ha->beacon_blink_led = 0;
1479 ha->beacon_color_state = QLA_LED_ALL_ON; 1496 ha->beacon_color_state = QLA_LED_ALL_ON;
1480 1497
1481 ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */ 1498 ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
1482 1499
1483 /* Give control back to firmware. */ 1500 /* Give control back to firmware. */
1484 spin_lock_irqsave(&ha->hardware_lock, flags); 1501 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1492,13 +1509,13 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1492 1509
1493 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1510 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1494 1511
1495 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1512 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1496 qla_printk(KERN_WARNING, ha, 1513 qla_printk(KERN_WARNING, ha,
1497 "Unable to update fw options (beacon off).\n"); 1514 "Unable to update fw options (beacon off).\n");
1498 return QLA_FUNCTION_FAILED; 1515 return QLA_FUNCTION_FAILED;
1499 } 1516 }
1500 1517
1501 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1518 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1502 qla_printk(KERN_WARNING, ha, 1519 qla_printk(KERN_WARNING, ha,
1503 "Unable to get fw options (beacon off).\n"); 1520 "Unable to get fw options (beacon off).\n");
1504 return QLA_FUNCTION_FAILED; 1521 return QLA_FUNCTION_FAILED;
@@ -1517,7 +1534,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1517 * @ha: HA context 1534 * @ha: HA context
1518 */ 1535 */
1519static void 1536static void
1520qla2x00_flash_enable(scsi_qla_host_t *ha) 1537qla2x00_flash_enable(struct qla_hw_data *ha)
1521{ 1538{
1522 uint16_t data; 1539 uint16_t data;
1523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1540 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1533,7 +1550,7 @@ qla2x00_flash_enable(scsi_qla_host_t *ha)
1533 * @ha: HA context 1550 * @ha: HA context
1534 */ 1551 */
1535static void 1552static void
1536qla2x00_flash_disable(scsi_qla_host_t *ha) 1553qla2x00_flash_disable(struct qla_hw_data *ha)
1537{ 1554{
1538 uint16_t data; 1555 uint16_t data;
1539 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1554,7 +1571,7 @@ qla2x00_flash_disable(scsi_qla_host_t *ha)
1554 * Returns the byte read from flash @addr. 1571 * Returns the byte read from flash @addr.
1555 */ 1572 */
1556static uint8_t 1573static uint8_t
1557qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) 1574qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
1558{ 1575{
1559 uint16_t data; 1576 uint16_t data;
1560 uint16_t bank_select; 1577 uint16_t bank_select;
@@ -1615,7 +1632,7 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1615 * @data: Data to write 1632 * @data: Data to write
1616 */ 1633 */
1617static void 1634static void
1618qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) 1635qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
1619{ 1636{
1620 uint16_t bank_select; 1637 uint16_t bank_select;
1621 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1638 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1678,7 +1695,7 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1678 * Returns 0 on success, else non-zero. 1695 * Returns 0 on success, else non-zero.
1679 */ 1696 */
1680static int 1697static int
1681qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, 1698qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
1682 uint8_t man_id, uint8_t flash_id) 1699 uint8_t man_id, uint8_t flash_id)
1683{ 1700{
1684 int status; 1701 int status;
@@ -1718,8 +1735,8 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1718 * Returns 0 on success, else non-zero. 1735 * Returns 0 on success, else non-zero.
1719 */ 1736 */
1720static int 1737static int
1721qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, 1738qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
1722 uint8_t man_id, uint8_t flash_id) 1739 uint8_t data, uint8_t man_id, uint8_t flash_id)
1723{ 1740{
1724 /* Write Program Command Sequence. */ 1741 /* Write Program Command Sequence. */
1725 if (IS_OEM_001(ha)) { 1742 if (IS_OEM_001(ha)) {
@@ -1755,7 +1772,7 @@ qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1755 * Returns 0 on success, else non-zero. 1772 * Returns 0 on success, else non-zero.
1756 */ 1773 */
1757static int 1774static int
1758qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) 1775qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
1759{ 1776{
1760 /* Individual Sector Erase Command Sequence */ 1777 /* Individual Sector Erase Command Sequence */
1761 if (IS_OEM_001(ha)) { 1778 if (IS_OEM_001(ha)) {
@@ -1791,7 +1808,7 @@ qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1791 * Returns 0 on success, else non-zero. 1808 * Returns 0 on success, else non-zero.
1792 */ 1809 */
1793static int 1810static int
1794qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, 1811qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
1795 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) 1812 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1796{ 1813{
1797 /* Individual Sector Erase Command Sequence */ 1814 /* Individual Sector Erase Command Sequence */
@@ -1817,7 +1834,7 @@ qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1817 * @flash_id: Flash ID 1834 * @flash_id: Flash ID
1818 */ 1835 */
1819static void 1836static void
1820qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 1837qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
1821 uint8_t *flash_id) 1838 uint8_t *flash_id)
1822{ 1839{
1823 qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1840 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
@@ -1831,8 +1848,8 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1831} 1848}
1832 1849
1833static void 1850static void
1834qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr, 1851qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
1835 uint32_t length) 1852 uint32_t saddr, uint32_t length)
1836{ 1853{
1837 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1854 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1838 uint32_t midpoint, ilength; 1855 uint32_t midpoint, ilength;
@@ -1856,14 +1873,15 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1856} 1873}
1857 1874
1858static inline void 1875static inline void
1859qla2x00_suspend_hba(struct scsi_qla_host *ha) 1876qla2x00_suspend_hba(struct scsi_qla_host *vha)
1860{ 1877{
1861 int cnt; 1878 int cnt;
1862 unsigned long flags; 1879 unsigned long flags;
1880 struct qla_hw_data *ha = vha->hw;
1863 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1881 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1864 1882
1865 /* Suspend HBA. */ 1883 /* Suspend HBA. */
1866 scsi_block_requests(ha->host); 1884 scsi_block_requests(vha->host);
1867 ha->isp_ops->disable_intrs(ha); 1885 ha->isp_ops->disable_intrs(ha);
1868 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1886 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1869 1887
@@ -1884,26 +1902,29 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
1884} 1902}
1885 1903
1886static inline void 1904static inline void
1887qla2x00_resume_hba(struct scsi_qla_host *ha) 1905qla2x00_resume_hba(struct scsi_qla_host *vha)
1888{ 1906{
1907 struct qla_hw_data *ha = vha->hw;
1908
1889 /* Resume HBA. */ 1909 /* Resume HBA. */
1890 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1910 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1891 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1911 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1892 qla2xxx_wake_dpc(ha); 1912 qla2xxx_wake_dpc(vha);
1893 qla2x00_wait_for_hba_online(ha); 1913 qla2x00_wait_for_hba_online(vha);
1894 scsi_unblock_requests(ha->host); 1914 scsi_unblock_requests(vha->host);
1895} 1915}
1896 1916
1897uint8_t * 1917uint8_t *
1898qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1918qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1899 uint32_t offset, uint32_t length) 1919 uint32_t offset, uint32_t length)
1900{ 1920{
1901 uint32_t addr, midpoint; 1921 uint32_t addr, midpoint;
1902 uint8_t *data; 1922 uint8_t *data;
1923 struct qla_hw_data *ha = vha->hw;
1903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1924 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1904 1925
1905 /* Suspend HBA. */ 1926 /* Suspend HBA. */
1906 qla2x00_suspend_hba(ha); 1927 qla2x00_suspend_hba(vha);
1907 1928
1908 /* Go with read. */ 1929 /* Go with read. */
1909 midpoint = ha->optrom_size / 2; 1930 midpoint = ha->optrom_size / 2;
@@ -1922,13 +1943,13 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1922 qla2x00_flash_disable(ha); 1943 qla2x00_flash_disable(ha);
1923 1944
1924 /* Resume HBA. */ 1945 /* Resume HBA. */
1925 qla2x00_resume_hba(ha); 1946 qla2x00_resume_hba(vha);
1926 1947
1927 return buf; 1948 return buf;
1928} 1949}
1929 1950
1930int 1951int
1931qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1952qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1932 uint32_t offset, uint32_t length) 1953 uint32_t offset, uint32_t length)
1933{ 1954{
1934 1955
@@ -1936,10 +1957,11 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1936 uint8_t man_id, flash_id, sec_number, data; 1957 uint8_t man_id, flash_id, sec_number, data;
1937 uint16_t wd; 1958 uint16_t wd;
1938 uint32_t addr, liter, sec_mask, rest_addr; 1959 uint32_t addr, liter, sec_mask, rest_addr;
1960 struct qla_hw_data *ha = vha->hw;
1939 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1961 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1940 1962
1941 /* Suspend HBA. */ 1963 /* Suspend HBA. */
1942 qla2x00_suspend_hba(ha); 1964 qla2x00_suspend_hba(vha);
1943 1965
1944 rval = QLA_SUCCESS; 1966 rval = QLA_SUCCESS;
1945 sec_number = 0; 1967 sec_number = 0;
@@ -2139,55 +2161,58 @@ update_flash:
2139 qla2x00_flash_disable(ha); 2161 qla2x00_flash_disable(ha);
2140 2162
2141 /* Resume HBA. */ 2163 /* Resume HBA. */
2142 qla2x00_resume_hba(ha); 2164 qla2x00_resume_hba(vha);
2143 2165
2144 return rval; 2166 return rval;
2145} 2167}
2146 2168
2147uint8_t * 2169uint8_t *
2148qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2170qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2149 uint32_t offset, uint32_t length) 2171 uint32_t offset, uint32_t length)
2150{ 2172{
2173 struct qla_hw_data *ha = vha->hw;
2174
2151 /* Suspend HBA. */ 2175 /* Suspend HBA. */
2152 scsi_block_requests(ha->host); 2176 scsi_block_requests(vha->host);
2153 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2177 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2154 2178
2155 /* Go with read. */ 2179 /* Go with read. */
2156 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); 2180 qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
2157 2181
2158 /* Resume HBA. */ 2182 /* Resume HBA. */
2159 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2183 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2160 scsi_unblock_requests(ha->host); 2184 scsi_unblock_requests(vha->host);
2161 2185
2162 return buf; 2186 return buf;
2163} 2187}
2164 2188
2165int 2189int
2166qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2190qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2167 uint32_t offset, uint32_t length) 2191 uint32_t offset, uint32_t length)
2168{ 2192{
2169 int rval; 2193 int rval;
2194 struct qla_hw_data *ha = vha->hw;
2170 2195
2171 /* Suspend HBA. */ 2196 /* Suspend HBA. */
2172 scsi_block_requests(ha->host); 2197 scsi_block_requests(vha->host);
2173 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2198 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2174 2199
2175 /* Go with write. */ 2200 /* Go with write. */
2176 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, 2201 rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
2177 length >> 2); 2202 length >> 2);
2178 2203
2179 /* Resume HBA -- RISC reset needed. */ 2204 /* Resume HBA -- RISC reset needed. */
2180 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2205 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2181 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2206 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2182 qla2xxx_wake_dpc(ha); 2207 qla2xxx_wake_dpc(vha);
2183 qla2x00_wait_for_hba_online(ha); 2208 qla2x00_wait_for_hba_online(vha);
2184 scsi_unblock_requests(ha->host); 2209 scsi_unblock_requests(vha->host);
2185 2210
2186 return rval; 2211 return rval;
2187} 2212}
2188 2213
2189uint8_t * 2214uint8_t *
2190qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2215qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2191 uint32_t offset, uint32_t length) 2216 uint32_t offset, uint32_t length)
2192{ 2217{
2193 int rval; 2218 int rval;
@@ -2195,6 +2220,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2195 void *optrom; 2220 void *optrom;
2196 uint8_t *pbuf; 2221 uint8_t *pbuf;
2197 uint32_t faddr, left, burst; 2222 uint32_t faddr, left, burst;
2223 struct qla_hw_data *ha = vha->hw;
2198 2224
2199 if (offset & 0xfff) 2225 if (offset & 0xfff)
2200 goto slow_read; 2226 goto slow_read;
@@ -2219,7 +2245,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2219 if (burst > left) 2245 if (burst > left)
2220 burst = left; 2246 burst = left;
2221 2247
2222 rval = qla2x00_dump_ram(ha, optrom_dma, 2248 rval = qla2x00_dump_ram(vha, optrom_dma,
2223 flash_data_to_access_addr(faddr), burst); 2249 flash_data_to_access_addr(faddr), burst);
2224 if (rval) { 2250 if (rval) {
2225 qla_printk(KERN_WARNING, ha, 2251 qla_printk(KERN_WARNING, ha,
@@ -2248,7 +2274,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2248 return buf; 2274 return buf;
2249 2275
2250slow_read: 2276slow_read:
2251 return qla24xx_read_optrom_data(ha, buf, offset, length); 2277 return qla24xx_read_optrom_data(vha, buf, offset, length);
2252} 2278}
2253 2279
2254/** 2280/**
@@ -2270,7 +2296,7 @@ slow_read:
2270 * Returns QLA_SUCCESS on successful retrieval of version. 2296 * Returns QLA_SUCCESS on successful retrieval of version.
2271 */ 2297 */
2272static void 2298static void
2273qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids) 2299qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
2274{ 2300{
2275 int ret = QLA_FUNCTION_FAILED; 2301 int ret = QLA_FUNCTION_FAILED;
2276 uint32_t istart, iend, iter, vend; 2302 uint32_t istart, iend, iter, vend;
@@ -2344,13 +2370,14 @@ qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
2344} 2370}
2345 2371
2346int 2372int
2347qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2373qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2348{ 2374{
2349 int ret = QLA_SUCCESS; 2375 int ret = QLA_SUCCESS;
2350 uint8_t code_type, last_image; 2376 uint8_t code_type, last_image;
2351 uint32_t pcihdr, pcids; 2377 uint32_t pcihdr, pcids;
2352 uint8_t *dbyte; 2378 uint8_t *dbyte;
2353 uint16_t *dcode; 2379 uint16_t *dcode;
2380 struct qla_hw_data *ha = vha->hw;
2354 2381
2355 if (!ha->pio_address || !mbuf) 2382 if (!ha->pio_address || !mbuf)
2356 return QLA_FUNCTION_FAILED; 2383 return QLA_FUNCTION_FAILED;
@@ -2370,8 +2397,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2370 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2397 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2371 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2398 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2372 /* No signature */ 2399 /* No signature */
2373 DEBUG2(printk("scsi(%ld): No matching ROM " 2400 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2374 "signature.\n", ha->host_no)); 2401 "signature.\n"));
2375 ret = QLA_FUNCTION_FAILED; 2402 ret = QLA_FUNCTION_FAILED;
2376 break; 2403 break;
2377 } 2404 }
@@ -2387,8 +2414,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2387 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2414 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2388 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2415 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2389 /* Incorrect header. */ 2416 /* Incorrect header. */
2390 DEBUG2(printk("%s(): PCI data struct not found " 2417 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2391 "pcir_adr=%x.\n", __func__, pcids)); 2418 "found pcir_adr=%x.\n", pcids));
2392 ret = QLA_FUNCTION_FAILED; 2419 ret = QLA_FUNCTION_FAILED;
2393 break; 2420 break;
2394 } 2421 }
@@ -2402,7 +2429,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2402 qla2x00_read_flash_byte(ha, pcids + 0x12); 2429 qla2x00_read_flash_byte(ha, pcids + 0x12);
2403 ha->bios_revision[1] = 2430 ha->bios_revision[1] =
2404 qla2x00_read_flash_byte(ha, pcids + 0x13); 2431 qla2x00_read_flash_byte(ha, pcids + 0x13);
2405 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2432 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2406 ha->bios_revision[1], ha->bios_revision[0])); 2433 ha->bios_revision[1], ha->bios_revision[0]));
2407 break; 2434 break;
2408 case ROM_CODE_TYPE_FCODE: 2435 case ROM_CODE_TYPE_FCODE:
@@ -2416,12 +2443,12 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2416 qla2x00_read_flash_byte(ha, pcids + 0x12); 2443 qla2x00_read_flash_byte(ha, pcids + 0x12);
2417 ha->efi_revision[1] = 2444 ha->efi_revision[1] =
2418 qla2x00_read_flash_byte(ha, pcids + 0x13); 2445 qla2x00_read_flash_byte(ha, pcids + 0x13);
2419 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2446 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2420 ha->efi_revision[1], ha->efi_revision[0])); 2447 ha->efi_revision[1], ha->efi_revision[0]));
2421 break; 2448 break;
2422 default: 2449 default:
2423 DEBUG2(printk("%s(): Unrecognized code type %x at " 2450 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2424 "pcids %x.\n", __func__, code_type, pcids)); 2451 "type %x at pcids %x.\n", code_type, pcids));
2425 break; 2452 break;
2426 } 2453 }
2427 2454
@@ -2441,16 +2468,16 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2441 2468
2442 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2469 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2443 8); 2470 8);
2444 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2471 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
2445 __func__, ha->host_no)); 2472 "flash:\n"));
2446 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2473 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
2447 2474
2448 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2475 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2449 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2476 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2450 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2477 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2451 dcode[3] == 0)) { 2478 dcode[3] == 0)) {
2452 DEBUG2(printk("%s(): Unrecognized fw revision at " 2479 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2453 "%x.\n", __func__, ha->flt_region_fw * 4)); 2480 "revision at %x.\n", ha->flt_region_fw * 4));
2454 } else { 2481 } else {
2455 /* values are in big endian */ 2482 /* values are in big endian */
2456 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2483 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2465,7 +2492,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2465} 2492}
2466 2493
2467int 2494int
2468qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2495qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2469{ 2496{
2470 int ret = QLA_SUCCESS; 2497 int ret = QLA_SUCCESS;
2471 uint32_t pcihdr, pcids; 2498 uint32_t pcihdr, pcids;
@@ -2473,6 +2500,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2473 uint8_t *bcode; 2500 uint8_t *bcode;
2474 uint8_t code_type, last_image; 2501 uint8_t code_type, last_image;
2475 int i; 2502 int i;
2503 struct qla_hw_data *ha = vha->hw;
2476 2504
2477 if (!mbuf) 2505 if (!mbuf)
2478 return QLA_FUNCTION_FAILED; 2506 return QLA_FUNCTION_FAILED;
@@ -2489,12 +2517,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2489 last_image = 1; 2517 last_image = 1;
2490 do { 2518 do {
2491 /* Verify PCI expansion ROM header. */ 2519 /* Verify PCI expansion ROM header. */
2492 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 2520 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
2493 bcode = mbuf + (pcihdr % 4); 2521 bcode = mbuf + (pcihdr % 4);
2494 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2522 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2495 /* No signature */ 2523 /* No signature */
2496 DEBUG2(printk("scsi(%ld): No matching ROM " 2524 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2497 "signature.\n", ha->host_no)); 2525 "signature.\n"));
2498 ret = QLA_FUNCTION_FAILED; 2526 ret = QLA_FUNCTION_FAILED;
2499 break; 2527 break;
2500 } 2528 }
@@ -2502,15 +2530,15 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2502 /* Locate PCI data structure. */ 2530 /* Locate PCI data structure. */
2503 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 2531 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
2504 2532
2505 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 2533 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
2506 bcode = mbuf + (pcihdr % 4); 2534 bcode = mbuf + (pcihdr % 4);
2507 2535
2508 /* Validate signature of PCI data structure. */ 2536 /* Validate signature of PCI data structure. */
2509 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2537 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2510 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2538 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2511 /* Incorrect header. */ 2539 /* Incorrect header. */
2512 DEBUG2(printk("%s(): PCI data struct not found " 2540 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2513 "pcir_adr=%x.\n", __func__, pcids)); 2541 "found pcir_adr=%x.\n", pcids));
2514 ret = QLA_FUNCTION_FAILED; 2542 ret = QLA_FUNCTION_FAILED;
2515 break; 2543 break;
2516 } 2544 }
@@ -2522,26 +2550,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2522 /* Intel x86, PC-AT compatible. */ 2550 /* Intel x86, PC-AT compatible. */
2523 ha->bios_revision[0] = bcode[0x12]; 2551 ha->bios_revision[0] = bcode[0x12];
2524 ha->bios_revision[1] = bcode[0x13]; 2552 ha->bios_revision[1] = bcode[0x13];
2525 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2553 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2526 ha->bios_revision[1], ha->bios_revision[0])); 2554 ha->bios_revision[1], ha->bios_revision[0]));
2527 break; 2555 break;
2528 case ROM_CODE_TYPE_FCODE: 2556 case ROM_CODE_TYPE_FCODE:
2529 /* Open Firmware standard for PCI (FCode). */ 2557 /* Open Firmware standard for PCI (FCode). */
2530 ha->fcode_revision[0] = bcode[0x12]; 2558 ha->fcode_revision[0] = bcode[0x12];
2531 ha->fcode_revision[1] = bcode[0x13]; 2559 ha->fcode_revision[1] = bcode[0x13];
2532 DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__, 2560 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
2533 ha->fcode_revision[1], ha->fcode_revision[0])); 2561 ha->fcode_revision[1], ha->fcode_revision[0]));
2534 break; 2562 break;
2535 case ROM_CODE_TYPE_EFI: 2563 case ROM_CODE_TYPE_EFI:
2536 /* Extensible Firmware Interface (EFI). */ 2564 /* Extensible Firmware Interface (EFI). */
2537 ha->efi_revision[0] = bcode[0x12]; 2565 ha->efi_revision[0] = bcode[0x12];
2538 ha->efi_revision[1] = bcode[0x13]; 2566 ha->efi_revision[1] = bcode[0x13];
2539 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2567 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2540 ha->efi_revision[1], ha->efi_revision[0])); 2568 ha->efi_revision[1], ha->efi_revision[0]));
2541 break; 2569 break;
2542 default: 2570 default:
2543 DEBUG2(printk("%s(): Unrecognized code type %x at " 2571 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2544 "pcids %x.\n", __func__, code_type, pcids)); 2572 "type %x at pcids %x.\n", code_type, pcids));
2545 break; 2573 break;
2546 } 2574 }
2547 2575
@@ -2555,7 +2583,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2555 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2583 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2556 dcode = mbuf; 2584 dcode = mbuf;
2557 2585
2558 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4); 2586 qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
2559 for (i = 0; i < 4; i++) 2587 for (i = 0; i < 4; i++)
2560 dcode[i] = be32_to_cpu(dcode[i]); 2588 dcode[i] = be32_to_cpu(dcode[i]);
2561 2589
@@ -2563,8 +2591,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2563 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2591 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2564 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2592 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2565 dcode[3] == 0)) { 2593 dcode[3] == 0)) {
2566 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2594 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2567 __func__, ha->flt_region_fw)); 2595 "revision at %x.\n", ha->flt_region_fw * 4));
2568 } else { 2596 } else {
2569 ha->fw_revision[0] = dcode[0]; 2597 ha->fw_revision[0] = dcode[0];
2570 ha->fw_revision[1] = dcode[1]; 2598 ha->fw_revision[1] = dcode[1];
@@ -2593,8 +2621,9 @@ qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
2593} 2621}
2594 2622
2595int 2623int
2596qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size) 2624qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2597{ 2625{
2626 struct qla_hw_data *ha = vha->hw;
2598 uint8_t *pos = ha->vpd; 2627 uint8_t *pos = ha->vpd;
2599 uint8_t *end = pos + ha->vpd_size; 2628 uint8_t *end = pos + ha->vpd_size;
2600 int len = 0; 2629 int len = 0;
@@ -2621,9 +2650,10 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
2621} 2650}
2622 2651
2623static int 2652static int
2624qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) 2653qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
2625{ 2654{
2626 uint32_t d[2], faddr; 2655 uint32_t d[2], faddr;
2656 struct qla_hw_data *ha = vha->hw;
2627 2657
2628 /* Locate first empty entry. */ 2658 /* Locate first empty entry. */
2629 for (;;) { 2659 for (;;) {
@@ -2634,7 +2664,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2634 return QLA_MEMORY_ALLOC_FAILED; 2664 return QLA_MEMORY_ALLOC_FAILED;
2635 } 2665 }
2636 2666
2637 qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2); 2667 qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
2638 faddr = flash_data_to_access_addr(ha->hw_event_ptr); 2668 faddr = flash_data_to_access_addr(ha->hw_event_ptr);
2639 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2669 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2640 if (d[0] == __constant_cpu_to_le32(0xffffffff) && 2670 if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
@@ -2655,12 +2685,12 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2655} 2685}
2656 2686
2657int 2687int
2658qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, 2688qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
2659 uint16_t d2, uint16_t d3) 2689 uint16_t d2, uint16_t d3)
2660{ 2690{
2661#define QMARK(a, b, c, d) \ 2691#define QMARK(a, b, c, d) \
2662 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d)) 2692 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
2663 2693 struct qla_hw_data *ha = vha->hw;
2664 int rval; 2694 int rval;
2665 uint32_t marker[2], fdata[4]; 2695 uint32_t marker[2], fdata[4];
2666 2696
@@ -2681,7 +2711,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2681 /* Locate marker. */ 2711 /* Locate marker. */
2682 ha->hw_event_ptr = ha->flt_region_hw_event; 2712 ha->hw_event_ptr = ha->flt_region_hw_event;
2683 for (;;) { 2713 for (;;) {
2684 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2714 qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
2685 4); 2715 4);
2686 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) && 2716 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
2687 fdata[1] == __constant_cpu_to_le32(0xffffffff)) 2717 fdata[1] == __constant_cpu_to_le32(0xffffffff))
@@ -2700,7 +2730,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2700 } 2730 }
2701 /* No marker, write it. */ 2731 /* No marker, write it. */
2702 if (!ha->flags.hw_event_marker_found) { 2732 if (!ha->flags.hw_event_marker_found) {
2703 rval = qla2xxx_hw_event_store(ha, marker); 2733 rval = qla2xxx_hw_event_store(vha, marker);
2704 if (rval != QLA_SUCCESS) { 2734 if (rval != QLA_SUCCESS) {
2705 DEBUG2(qla_printk(KERN_WARNING, ha, 2735 DEBUG2(qla_printk(KERN_WARNING, ha,
2706 "HW event -- Failed marker write=%x.!\n", 2736 "HW event -- Failed marker write=%x.!\n",
@@ -2714,7 +2744,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2714 /* Store error. */ 2744 /* Store error. */
2715 fdata[0] = cpu_to_le32(code << 16 | d1); 2745 fdata[0] = cpu_to_le32(code << 16 | d1);
2716 fdata[1] = cpu_to_le32(d2 << 16 | d3); 2746 fdata[1] = cpu_to_le32(d2 << 16 | d3);
2717 rval = qla2xxx_hw_event_store(ha, fdata); 2747 rval = qla2xxx_hw_event_store(vha, fdata);
2718 if (rval != QLA_SUCCESS) { 2748 if (rval != QLA_SUCCESS) {
2719 DEBUG2(qla_printk(KERN_WARNING, ha, 2749 DEBUG2(qla_printk(KERN_WARNING, ha,
2720 "HW event -- Failed error write=%x.!\n", 2750 "HW event -- Failed error write=%x.!\n",
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index eea6720adf16..be22f3a09f8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k9" 10#define QLA2XXX_VERSION "8.02.03-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 3
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index de7b3bc2cbc9..1ad51552d6b1 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -23,7 +23,7 @@
23 Functions as standalone, loadable, and PCMCIA driver, the latter from 23 Functions as standalone, loadable, and PCMCIA driver, the latter from
24 Dave Hinds' PCMCIA package. 24 Dave Hinds' PCMCIA package.
25 25
26 Cleaned up 26/10/2002 by Alan Cox <alan@redhat.com> as part of the 2.5 26 Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
27 SCSI driver cleanup and audit. This driver still needs work on the 27 SCSI driver cleanup and audit. This driver still needs work on the
28 following 28 following
29 - Non terminating hardware waits 29 - Non terminating hardware waits
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index edfaf241c5ba..381838ebd460 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -136,7 +136,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
136 else 136 else
137 eh_timed_out = NULL; 137 eh_timed_out = NULL;
138 138
139 if (eh_timed_out) 139 if (eh_timed_out) {
140 rtn = eh_timed_out(scmd); 140 rtn = eh_timed_out(scmd);
141 switch (rtn) { 141 switch (rtn) {
142 case BLK_EH_NOT_HANDLED: 142 case BLK_EH_NOT_HANDLED:
@@ -144,6 +144,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
144 default: 144 default:
145 return rtn; 145 return rtn;
146 } 146 }
147 }
147 148
148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 149 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
149 scmd->result |= DID_TIME_OUT << 16; 150 scmd->result |= DID_TIME_OUT << 16;
@@ -1405,8 +1406,9 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1405 return ADD_TO_MLQUEUE; 1406 return ADD_TO_MLQUEUE;
1406 case GOOD: 1407 case GOOD:
1407 case COMMAND_TERMINATED: 1408 case COMMAND_TERMINATED:
1408 case TASK_ABORTED:
1409 return SUCCESS; 1409 return SUCCESS;
1410 case TASK_ABORTED:
1411 goto maybe_retry;
1410 case CHECK_CONDITION: 1412 case CHECK_CONDITION:
1411 rtn = scsi_check_sense(scmd); 1413 rtn = scsi_check_sense(scmd);
1412 if (rtn == NEEDS_RETRY) 1414 if (rtn == NEEDS_RETRY)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index dc1cfb2fd76b..2ae4f8fc5831 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -94,7 +94,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); 94 SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
95 95
96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, 96 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
97 &sshdr, timeout, retries); 97 &sshdr, timeout, retries, NULL);
98 98
99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result)); 99 SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
100 100
@@ -270,11 +270,11 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
270EXPORT_SYMBOL(scsi_ioctl); 270EXPORT_SYMBOL(scsi_ioctl);
271 271
272/** 272/**
273 * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET 273 * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET
274 * @sdev: scsi device receiving ioctl 274 * @sdev: scsi device receiving ioctl
275 * @cmd: Must be SC_SCSI_RESET 275 * @cmd: Must be SC_SCSI_RESET
276 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST} 276 * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
277 * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag. 277 * @ndelay: file mode O_NDELAY flag
278 */ 278 */
279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, 279int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
280 void __user *arg, int ndelay) 280 void __user *arg, int ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 148d3af92aef..f2f51e0333eb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -183,13 +183,15 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
183 * @timeout: request timeout in seconds 183 * @timeout: request timeout in seconds
184 * @retries: number of times to retry request 184 * @retries: number of times to retry request
185 * @flags: or into request flags; 185 * @flags: or into request flags;
186 * @resid: optional residual length
186 * 187 *
187 * returns the req->errors value which is the scsi_cmnd result 188 * returns the req->errors value which is the scsi_cmnd result
188 * field. 189 * field.
189 */ 190 */
190int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 191int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
191 int data_direction, void *buffer, unsigned bufflen, 192 int data_direction, void *buffer, unsigned bufflen,
192 unsigned char *sense, int timeout, int retries, int flags) 193 unsigned char *sense, int timeout, int retries, int flags,
194 int *resid)
193{ 195{
194 struct request *req; 196 struct request *req;
195 int write = (data_direction == DMA_TO_DEVICE); 197 int write = (data_direction == DMA_TO_DEVICE);
@@ -224,6 +226,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
224 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 226 if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
225 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 227 memset(buffer + (bufflen - req->data_len), 0, req->data_len);
226 228
229 if (resid)
230 *resid = req->data_len;
227 ret = req->errors; 231 ret = req->errors;
228 out: 232 out:
229 blk_put_request(req); 233 blk_put_request(req);
@@ -235,7 +239,8 @@ EXPORT_SYMBOL(scsi_execute);
235 239
236int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 240int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
237 int data_direction, void *buffer, unsigned bufflen, 241 int data_direction, void *buffer, unsigned bufflen,
238 struct scsi_sense_hdr *sshdr, int timeout, int retries) 242 struct scsi_sense_hdr *sshdr, int timeout, int retries,
243 int *resid)
239{ 244{
240 char *sense = NULL; 245 char *sense = NULL;
241 int result; 246 int result;
@@ -246,7 +251,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
246 return DRIVER_ERROR << 24; 251 return DRIVER_ERROR << 24;
247 } 252 }
248 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 253 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
249 sense, timeout, retries, 0); 254 sense, timeout, retries, 0, resid);
250 if (sshdr) 255 if (sshdr)
251 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 256 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
252 257
@@ -875,16 +880,24 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
875 * (the normal case for most drivers), we don't need 880 * (the normal case for most drivers), we don't need
876 * the logic to deal with cleaning up afterwards. 881 * the logic to deal with cleaning up afterwards.
877 * 882 *
878 * We must do one of several things here: 883 * We must call scsi_end_request(). This will finish off
884 * the specified number of sectors. If we are done, the
885 * command block will be released and the queue function
886 * will be goosed. If we are not done then we have to
887 * figure out what to do next:
879 * 888 *
880 * a) Call scsi_end_request. This will finish off the 889 * a) We can call scsi_requeue_command(). The request
881 * specified number of sectors. If we are done, the 890 * will be unprepared and put back on the queue. Then
882 * command block will be released, and the queue 891 * a new command will be created for it. This should
883 * function will be goosed. If we are not done, then 892 * be used if we made forward progress, or if we want
884 * scsi_end_request will directly goose the queue. 893 * to switch from READ(10) to READ(6) for example.
885 * 894 *
886 * b) We can just use scsi_requeue_command() here. This would 895 * b) We can call scsi_queue_insert(). The request will
887 * be used if we just wanted to retry, for example. 896 * be put back on the queue and retried using the same
897 * command as before, possibly after a delay.
898 *
899 * c) We can call blk_end_request() with -EIO to fail
900 * the remainder of the request.
888 */ 901 */
889void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 902void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
890{ 903{
@@ -896,6 +909,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
896 struct scsi_sense_hdr sshdr; 909 struct scsi_sense_hdr sshdr;
897 int sense_valid = 0; 910 int sense_valid = 0;
898 int sense_deferred = 0; 911 int sense_deferred = 0;
912 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
913 ACTION_DELAYED_RETRY} action;
914 char *description = NULL;
899 915
900 if (result) { 916 if (result) {
901 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 917 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -947,10 +963,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
947 return; 963 return;
948 this_count = blk_rq_bytes(req); 964 this_count = blk_rq_bytes(req);
949 965
950 /* good_bytes = 0, or (inclusive) there were leftovers and 966 if (host_byte(result) == DID_RESET) {
951 * result = 0, so scsi_end_request couldn't retry. 967 /* Third party bus reset or reset for error recovery
952 */ 968 * reasons. Just retry the command and see what
953 if (sense_valid && !sense_deferred) { 969 * happens.
970 */
971 action = ACTION_RETRY;
972 } else if (sense_valid && !sense_deferred) {
954 switch (sshdr.sense_key) { 973 switch (sshdr.sense_key) {
955 case UNIT_ATTENTION: 974 case UNIT_ATTENTION:
956 if (cmd->device->removable) { 975 if (cmd->device->removable) {
@@ -958,16 +977,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
958 * and quietly refuse further access. 977 * and quietly refuse further access.
959 */ 978 */
960 cmd->device->changed = 1; 979 cmd->device->changed = 1;
961 scsi_end_request(cmd, -EIO, this_count, 1); 980 description = "Media Changed";
962 return; 981 action = ACTION_FAIL;
963 } else { 982 } else {
964 /* Must have been a power glitch, or a 983 /* Must have been a power glitch, or a
965 * bus reset. Could not have been a 984 * bus reset. Could not have been a
966 * media change, so we just retry the 985 * media change, so we just retry the
967 * request and see what happens. 986 * command and see what happens.
968 */ 987 */
969 scsi_requeue_command(q, cmd); 988 action = ACTION_RETRY;
970 return;
971 } 989 }
972 break; 990 break;
973 case ILLEGAL_REQUEST: 991 case ILLEGAL_REQUEST:
@@ -983,21 +1001,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
983 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 1001 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
984 (cmd->cmnd[0] == READ_10 || 1002 (cmd->cmnd[0] == READ_10 ||
985 cmd->cmnd[0] == WRITE_10)) { 1003 cmd->cmnd[0] == WRITE_10)) {
1004 /* This will issue a new 6-byte command. */
986 cmd->device->use_10_for_rw = 0; 1005 cmd->device->use_10_for_rw = 0;
987 /* This will cause a retry with a 1006 action = ACTION_REPREP;
988 * 6-byte command. 1007 } else
989 */ 1008 action = ACTION_FAIL;
990 scsi_requeue_command(q, cmd); 1009 break;
991 } else if (sshdr.asc == 0x10) /* DIX */
992 scsi_end_request(cmd, -EIO, this_count, 0);
993 else
994 scsi_end_request(cmd, -EIO, this_count, 1);
995 return;
996 case ABORTED_COMMAND: 1010 case ABORTED_COMMAND:
997 if (sshdr.asc == 0x10) { /* DIF */ 1011 if (sshdr.asc == 0x10) { /* DIF */
998 scsi_end_request(cmd, -EIO, this_count, 0); 1012 action = ACTION_FAIL;
999 return; 1013 description = "Data Integrity Failure";
1000 } 1014 } else
1015 action = ACTION_RETRY;
1001 break; 1016 break;
1002 case NOT_READY: 1017 case NOT_READY:
1003 /* If the device is in the process of becoming 1018 /* If the device is in the process of becoming
@@ -1012,49 +1027,57 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1012 case 0x07: /* operation in progress */ 1027 case 0x07: /* operation in progress */
1013 case 0x08: /* Long write in progress */ 1028 case 0x08: /* Long write in progress */
1014 case 0x09: /* self test in progress */ 1029 case 0x09: /* self test in progress */
1015 scsi_requeue_command(q, cmd); 1030 action = ACTION_DELAYED_RETRY;
1016 return;
1017 default:
1018 break; 1031 break;
1019 } 1032 }
1033 } else {
1034 description = "Device not ready";
1035 action = ACTION_FAIL;
1020 } 1036 }
1021 if (!(req->cmd_flags & REQ_QUIET)) 1037 break;
1022 scsi_cmd_print_sense_hdr(cmd,
1023 "Device not ready",
1024 &sshdr);
1025
1026 scsi_end_request(cmd, -EIO, this_count, 1);
1027 return;
1028 case VOLUME_OVERFLOW: 1038 case VOLUME_OVERFLOW:
1029 if (!(req->cmd_flags & REQ_QUIET)) {
1030 scmd_printk(KERN_INFO, cmd,
1031 "Volume overflow, CDB: ");
1032 __scsi_print_command(cmd->cmnd);
1033 scsi_print_sense("", cmd);
1034 }
1035 /* See SSC3rXX or current. */ 1039 /* See SSC3rXX or current. */
1036 scsi_end_request(cmd, -EIO, this_count, 1); 1040 action = ACTION_FAIL;
1037 return; 1041 break;
1038 default: 1042 default:
1043 description = "Unhandled sense code";
1044 action = ACTION_FAIL;
1039 break; 1045 break;
1040 } 1046 }
1047 } else {
1048 description = "Unhandled error code";
1049 action = ACTION_FAIL;
1041 } 1050 }
1042 if (host_byte(result) == DID_RESET) { 1051
1043 /* Third party bus reset or reset for error recovery 1052 switch (action) {
1044 * reasons. Just retry the request and see what 1053 case ACTION_FAIL:
1045 * happens. 1054 /* Give up and fail the remainder of the request */
1046 */
1047 scsi_requeue_command(q, cmd);
1048 return;
1049 }
1050 if (result) {
1051 if (!(req->cmd_flags & REQ_QUIET)) { 1055 if (!(req->cmd_flags & REQ_QUIET)) {
1056 if (description)
1057 scmd_printk(KERN_INFO, cmd, "%s",
1058 description);
1052 scsi_print_result(cmd); 1059 scsi_print_result(cmd);
1053 if (driver_byte(result) & DRIVER_SENSE) 1060 if (driver_byte(result) & DRIVER_SENSE)
1054 scsi_print_sense("", cmd); 1061 scsi_print_sense("", cmd);
1055 } 1062 }
1063 blk_end_request(req, -EIO, blk_rq_bytes(req));
1064 scsi_next_command(cmd);
1065 break;
1066 case ACTION_REPREP:
1067 /* Unprep the request and put it back at the head of the queue.
1068 * A new command will be prepared and issued.
1069 */
1070 scsi_requeue_command(q, cmd);
1071 break;
1072 case ACTION_RETRY:
1073 /* Retry the same command immediately */
1074 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1075 break;
1076 case ACTION_DELAYED_RETRY:
1077 /* Retry the same command after a delay */
1078 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1079 break;
1056 } 1080 }
1057 scsi_end_request(cmd, -EIO, this_count, !result);
1058} 1081}
1059 1082
1060static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 1083static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1998,7 +2021,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1998 } 2021 }
1999 2022
2000 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2023 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2001 sshdr, timeout, retries); 2024 sshdr, timeout, retries, NULL);
2002 kfree(real_buffer); 2025 kfree(real_buffer);
2003 return ret; 2026 return ret;
2004} 2027}
@@ -2063,7 +2086,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2063 memset(buffer, 0, len); 2086 memset(buffer, 0, len);
2064 2087
2065 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2088 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2066 sshdr, timeout, retries); 2089 sshdr, timeout, retries, NULL);
2067 2090
2068 /* This code looks awful: what it's doing is making sure an 2091 /* This code looks awful: what it's doing is making sure an
2069 * ILLEGAL REQUEST sense return identifies the actual command 2092 * ILLEGAL REQUEST sense return identifies the actual command
@@ -2145,7 +2168,7 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2145 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2168 /* try to eat the UNIT_ATTENTION if there are enough retries */
2146 do { 2169 do {
2147 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2170 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2148 timeout, retries); 2171 timeout, retries, NULL);
2149 if (sdev->removable && scsi_sense_valid(sshdr) && 2172 if (sdev->removable && scsi_sense_valid(sshdr) &&
2150 sshdr->sense_key == UNIT_ATTENTION) 2173 sshdr->sense_key == UNIT_ATTENTION)
2151 sdev->changed = 1; 2174 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b14dc02c3ded..18486b51668d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -216,7 +216,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
216 scsi_cmd[4] = 0x2a; /* size */ 216 scsi_cmd[4] = 0x2a; /* size */
217 scsi_cmd[5] = 0; 217 scsi_cmd[5] = 0;
218 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL, 218 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
219 SCSI_TIMEOUT, 3); 219 SCSI_TIMEOUT, 3, NULL);
220} 220}
221 221
222/** 222/**
@@ -573,6 +573,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
573 573
574 /* Each pass gets up to three chances to ignore Unit Attention */ 574 /* Each pass gets up to three chances to ignore Unit Attention */
575 for (count = 0; count < 3; ++count) { 575 for (count = 0; count < 3; ++count) {
576 int resid;
577
576 memset(scsi_cmd, 0, 6); 578 memset(scsi_cmd, 0, 6);
577 scsi_cmd[0] = INQUIRY; 579 scsi_cmd[0] = INQUIRY;
578 scsi_cmd[4] = (unsigned char) try_inquiry_len; 580 scsi_cmd[4] = (unsigned char) try_inquiry_len;
@@ -581,7 +583,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
581 583
582 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 584 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
583 inq_result, try_inquiry_len, &sshdr, 585 inq_result, try_inquiry_len, &sshdr,
584 HZ / 2 + HZ * scsi_inq_timeout, 3); 586 HZ / 2 + HZ * scsi_inq_timeout, 3,
587 &resid);
585 588
586 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " 589 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
587 "with code 0x%x\n", 590 "with code 0x%x\n",
@@ -602,6 +605,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
602 (sshdr.ascq == 0)) 605 (sshdr.ascq == 0))
603 continue; 606 continue;
604 } 607 }
608 } else {
609 /*
610 * if nothing was transferred, we try
611 * again. It's a workaround for some USB
612 * devices.
613 */
614 if (resid == try_inquiry_len)
615 continue;
605 } 616 }
606 break; 617 break;
607 } 618 }
@@ -1390,7 +1401,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1390 1401
1391 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 1402 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1392 lun_data, length, &sshdr, 1403 lun_data, length, &sshdr,
1393 SCSI_TIMEOUT + 4 * HZ, 3); 1404 SCSI_TIMEOUT + 4 * HZ, 3, NULL);
1394 1405
1395 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" 1406 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
1396 " %s (try %d) result 0x%x\n", result 1407 " %s (try %d) result 0x%x\n", result
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1e71abf0607a..062304de4854 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3012,6 +3012,16 @@ fc_timeout_deleted_rport(struct work_struct *work)
3012 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3012 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3014 3014
3015 /*
3016 * Pre-emptively kill I/O rather than waiting for the work queue
3017 * item to teardown the starget. (FCOE libFC folks prefer this
3018 * and to have the rport_port_id still set when it's done).
3019 */
3020 spin_unlock_irqrestore(shost->host_lock, flags);
3021 fc_terminate_rport_io(rport);
3022
3023 BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
3024
3015 /* remove the identifiers that aren't used in the consisting binding */ 3025 /* remove the identifiers that aren't used in the consisting binding */
3016 switch (fc_host->tgtid_bind_type) { 3026 switch (fc_host->tgtid_bind_type) {
3017 case FC_TGTID_BIND_BY_WWPN: 3027 case FC_TGTID_BIND_BY_WWPN:
@@ -3035,9 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
3035 * went away and didn't come back - we'll remove 3045 * went away and didn't come back - we'll remove
3036 * all attached scsi devices. 3046 * all attached scsi devices.
3037 */ 3047 */
3038 spin_unlock_irqrestore(shost->host_lock, flags);
3039
3040 scsi_target_unblock(&rport->dev);
3041 fc_queue_work(shost, &rport->stgt_delete_work); 3048 fc_queue_work(shost, &rport->stgt_delete_work);
3042} 3049}
3043 3050
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 7c2d28924d2a..f49f55c6bfc8 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -111,8 +111,9 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
111 sense, DV_TIMEOUT, /* retries */ 1, 111 sense, DV_TIMEOUT, /* retries */ 1,
112 REQ_FAILFAST_DEV | 112 REQ_FAILFAST_DEV |
113 REQ_FAILFAST_TRANSPORT | 113 REQ_FAILFAST_TRANSPORT |
114 REQ_FAILFAST_DRIVER); 114 REQ_FAILFAST_DRIVER,
115 if (result & DRIVER_SENSE) { 115 NULL);
116 if (driver_byte(result) & DRIVER_SENSE) {
116 struct scsi_sense_hdr sshdr_tmp; 117 struct scsi_sense_hdr sshdr_tmp;
117 if (!sshdr) 118 if (!sshdr)
118 sshdr = &sshdr_tmp; 119 sshdr = &sshdr_tmp;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5081b3981d3c..62b28d58e65e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -884,7 +884,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
884 * flush everything. 884 * flush everything.
885 */ 885 */
886 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 886 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
887 SD_TIMEOUT, SD_MAX_RETRIES); 887 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
888 if (res == 0) 888 if (res == 0)
889 break; 889 break;
890 } 890 }
@@ -1134,7 +1134,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1134 the_result = scsi_execute_req(sdkp->device, cmd, 1134 the_result = scsi_execute_req(sdkp->device, cmd,
1135 DMA_NONE, NULL, 0, 1135 DMA_NONE, NULL, 0,
1136 &sshdr, SD_TIMEOUT, 1136 &sshdr, SD_TIMEOUT,
1137 SD_MAX_RETRIES); 1137 SD_MAX_RETRIES, NULL);
1138 1138
1139 /* 1139 /*
1140 * If the drive has indicated to us that it 1140 * If the drive has indicated to us that it
@@ -1192,7 +1192,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1192 cmd[4] |= 1 << 4; 1192 cmd[4] |= 1 << 4;
1193 scsi_execute_req(sdkp->device, cmd, DMA_NONE, 1193 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1194 NULL, 0, &sshdr, 1194 NULL, 0, &sshdr,
1195 SD_TIMEOUT, SD_MAX_RETRIES); 1195 SD_TIMEOUT, SD_MAX_RETRIES,
1196 NULL);
1196 spintime_expire = jiffies + 100 * HZ; 1197 spintime_expire = jiffies + 100 * HZ;
1197 spintime = 1; 1198 spintime = 1;
1198 } 1199 }
@@ -1306,7 +1307,7 @@ repeat:
1306 1307
1307 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1308 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1308 buffer, longrc ? 13 : 8, &sshdr, 1309 buffer, longrc ? 13 : 8, &sshdr,
1309 SD_TIMEOUT, SD_MAX_RETRIES); 1310 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1310 1311
1311 if (media_not_present(sdkp, &sshdr)) 1312 if (media_not_present(sdkp, &sshdr))
1312 return; 1313 return;
@@ -1986,7 +1987,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
1986 return -ENODEV; 1987 return -ENODEV;
1987 1988
1988 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1989 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1989 SD_TIMEOUT, SD_MAX_RETRIES); 1990 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1990 if (res) { 1991 if (res) {
1991 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); 1992 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
1992 sd_print_result(sdkp, res); 1993 sd_print_result(sdkp, res);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 1bcf3c33d7ff..7f0df29f3a64 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -77,7 +77,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
77 }; 77 };
78 78
79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
80 NULL, SES_TIMEOUT, SES_RETRIES); 80 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
81} 81}
82 82
83static int ses_send_diag(struct scsi_device *sdev, int page_code, 83static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -95,7 +95,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
95 }; 95 };
96 96
97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
98 NULL, SES_TIMEOUT, SES_RETRIES); 98 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
99 if (result) 99 if (result)
100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
101 result); 101 result);
@@ -369,7 +369,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
369 return; 369 return;
370 370
371 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 371 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES)) 372 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES,
373 NULL))
373 goto free; 374 goto free;
374 375
375 vpd_len = (buf[2] << 8) + buf[3]; 376 vpd_len = (buf[2] << 8) + buf[3];
@@ -380,7 +381,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
380 cmd[3] = vpd_len >> 8; 381 cmd[3] = vpd_len >> 8;
381 cmd[4] = vpd_len & 0xff; 382 cmd[4] = vpd_len & 0xff;
382 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 383 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
383 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES)) 384 vpd_len, NULL, SES_TIMEOUT, SES_RETRIES, NULL))
384 goto free; 385 goto free;
385 386
386 desc = buf + 4; 387 desc = buf + 4;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 45b66b98a516..e7fa3caead79 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -177,7 +177,7 @@ int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
177 do { 177 do {
178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
179 0, sshdr, SR_TIMEOUT, 179 0, sshdr, SR_TIMEOUT,
180 retries--); 180 retries--, NULL);
181 if (scsi_sense_valid(sshdr) && 181 if (scsi_sense_valid(sshdr) &&
182 sshdr->sense_key == UNIT_ATTENTION) 182 sshdr->sense_key == UNIT_ATTENTION)
183 sdev->changed = 1; 183 sdev->changed = 1;
@@ -681,7 +681,7 @@ static void get_sectorsize(struct scsi_cd *cd)
681 /* Do the command and wait.. */ 681 /* Do the command and wait.. */
682 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE, 682 the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
683 buffer, sizeof(buffer), NULL, 683 buffer, sizeof(buffer), NULL,
684 SR_TIMEOUT, MAX_RETRIES); 684 SR_TIMEOUT, MAX_RETRIES, NULL);
685 685
686 retries--; 686 retries--;
687 687
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ae87d08df588..d92ff512d213 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -207,7 +207,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
207 memset(sense, 0, sizeof(*sense)); 207 memset(sense, 0, sizeof(*sense));
208 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, 208 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
209 cgc->buffer, cgc->buflen, (char *)sense, 209 cgc->buffer, cgc->buflen, (char *)sense,
210 cgc->timeout, IOCTL_RETRIES, 0); 210 cgc->timeout, IOCTL_RETRIES, 0, NULL);
211 211
212 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); 212 scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
213 213
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c959bdc55f4f..7f3f317ee6ca 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -451,9 +451,23 @@ static void st_sleep_done(void *data, char *sense, int result, int resid)
451 complete(SRpnt->waiting); 451 complete(SRpnt->waiting);
452} 452}
453 453
454static struct st_request *st_allocate_request(void) 454static struct st_request *st_allocate_request(struct scsi_tape *stp)
455{ 455{
456 return kzalloc(sizeof(struct st_request), GFP_KERNEL); 456 struct st_request *streq;
457
458 streq = kzalloc(sizeof(*streq), GFP_KERNEL);
459 if (streq)
460 streq->stp = stp;
461 else {
462 DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n",
463 tape_name(stp)););
464 if (signal_pending(current))
465 stp->buffer->syscall_result = -EINTR;
466 else
467 stp->buffer->syscall_result = -EBUSY;
468 }
469
470 return streq;
457} 471}
458 472
459static void st_release_request(struct st_request *streq) 473static void st_release_request(struct st_request *streq)
@@ -481,18 +495,10 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
481 return NULL; 495 return NULL;
482 } 496 }
483 497
484 if (SRpnt == NULL) { 498 if (!SRpnt) {
485 SRpnt = st_allocate_request(); 499 SRpnt = st_allocate_request(STp);
486 if (SRpnt == NULL) { 500 if (!SRpnt)
487 DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
488 tape_name(STp)); );
489 if (signal_pending(current))
490 (STp->buffer)->syscall_result = (-EINTR);
491 else
492 (STp->buffer)->syscall_result = (-EBUSY);
493 return NULL; 501 return NULL;
494 }
495 SRpnt->stp = STp;
496 } 502 }
497 503
498 /* If async IO, set last_SRpnt. This ptr tells write_behind_check 504 /* If async IO, set last_SRpnt. This ptr tells write_behind_check
@@ -527,6 +533,28 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
527 return SRpnt; 533 return SRpnt;
528} 534}
529 535
536static int st_scsi_kern_execute(struct st_request *streq,
537 const unsigned char *cmd, int data_direction,
538 void *buffer, unsigned bufflen, int timeout,
539 int retries)
540{
541 struct scsi_tape *stp = streq->stp;
542 int ret, resid;
543
544 stp->buffer->cmdstat.have_sense = 0;
545 memcpy(streq->cmd, cmd, sizeof(streq->cmd));
546
547 ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
548 streq->sense, timeout, retries, 0, &resid);
549 if (driver_byte(ret) & DRIVER_ERROR)
550 return -EBUSY;
551
552 stp->buffer->cmdstat.midlevel_result = streq->result = ret;
553 stp->buffer->cmdstat.residual = resid;
554 stp->buffer->syscall_result = st_chk_result(stp, streq);
555
556 return 0;
557}
530 558
531/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if 559/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
532 write has been correct but EOM early warning reached, -EIO if write ended in 560 write has been correct but EOM early warning reached, -EIO if write ended in
@@ -599,6 +627,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
599{ 627{
600 struct st_request *SRpnt; 628 struct st_request *SRpnt;
601 unsigned char cmd[MAX_COMMAND_SIZE]; 629 unsigned char cmd[MAX_COMMAND_SIZE];
630 int ret;
602 631
603 cmd[0] = SPACE; 632 cmd[0] = SPACE;
604 cmd[1] = 0x01; /* Space FileMarks */ 633 cmd[1] = 0x01; /* Space FileMarks */
@@ -612,19 +641,26 @@ static int cross_eof(struct scsi_tape * STp, int forward)
612 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", 641 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
613 tape_name(STp), forward ? "forward" : "backward")); 642 tape_name(STp), forward ? "forward" : "backward"));
614 643
615 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 644 SRpnt = st_allocate_request(STp);
616 STp->device->timeout, MAX_RETRIES, 1);
617 if (!SRpnt) 645 if (!SRpnt)
618 return (STp->buffer)->syscall_result; 646 return STp->buffer->syscall_result;
619 647
620 st_release_request(SRpnt); 648 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
621 SRpnt = NULL; 649 STp->device->request_queue->rq_timeout,
650 MAX_RETRIES);
651 if (ret)
652 goto out;
653
654 ret = STp->buffer->syscall_result;
622 655
623 if ((STp->buffer)->cmdstat.midlevel_result != 0) 656 if ((STp->buffer)->cmdstat.midlevel_result != 0)
624 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", 657 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
625 tape_name(STp), forward ? "forward" : "backward"); 658 tape_name(STp), forward ? "forward" : "backward");
626 659
627 return (STp->buffer)->syscall_result; 660out:
661 st_release_request(SRpnt);
662
663 return ret;
628} 664}
629 665
630 666
@@ -657,7 +693,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
657 cmd[4] = blks; 693 cmd[4] = blks;
658 694
659 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE, 695 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
660 STp->device->timeout, MAX_WRITE_RETRIES, 1); 696 STp->device->request_queue->rq_timeout,
697 MAX_WRITE_RETRIES, 1);
661 if (!SRpnt) 698 if (!SRpnt)
662 return (STp->buffer)->syscall_result; 699 return (STp->buffer)->syscall_result;
663 700
@@ -844,21 +881,24 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
844 int attentions, waits, max_wait, scode; 881 int attentions, waits, max_wait, scode;
845 int retval = CHKRES_READY, new_session = 0; 882 int retval = CHKRES_READY, new_session = 0;
846 unsigned char cmd[MAX_COMMAND_SIZE]; 883 unsigned char cmd[MAX_COMMAND_SIZE];
847 struct st_request *SRpnt = NULL; 884 struct st_request *SRpnt;
848 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 885 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
849 886
887 SRpnt = st_allocate_request(STp);
888 if (!SRpnt)
889 return STp->buffer->syscall_result;
890
850 max_wait = do_wait ? ST_BLOCK_SECONDS : 0; 891 max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
851 892
852 for (attentions=waits=0; ; ) { 893 for (attentions=waits=0; ; ) {
853 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 894 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
854 cmd[0] = TEST_UNIT_READY; 895 cmd[0] = TEST_UNIT_READY;
855 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
856 STp->long_timeout, MAX_READY_RETRIES, 1);
857 896
858 if (!SRpnt) { 897 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
859 retval = (STp->buffer)->syscall_result; 898 STp->long_timeout,
899 MAX_READY_RETRIES);
900 if (retval)
860 break; 901 break;
861 }
862 902
863 if (cmdstatp->have_sense) { 903 if (cmdstatp->have_sense) {
864 904
@@ -902,8 +942,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
902 break; 942 break;
903 } 943 }
904 944
905 if (SRpnt != NULL) 945 st_release_request(SRpnt);
906 st_release_request(SRpnt); 946
907 return retval; 947 return retval;
908} 948}
909 949
@@ -980,16 +1020,24 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
980 } 1020 }
981 } 1021 }
982 1022
1023 SRpnt = st_allocate_request(STp);
1024 if (!SRpnt) {
1025 retval = STp->buffer->syscall_result;
1026 goto err_out;
1027 }
1028
983 if (STp->omit_blklims) 1029 if (STp->omit_blklims)
984 STp->min_block = STp->max_block = (-1); 1030 STp->min_block = STp->max_block = (-1);
985 else { 1031 else {
986 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 1032 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
987 cmd[0] = READ_BLOCK_LIMITS; 1033 cmd[0] = READ_BLOCK_LIMITS;
988 1034
989 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE, 1035 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
990 STp->device->timeout, MAX_READY_RETRIES, 1); 1036 STp->buffer->b_data, 6,
991 if (!SRpnt) { 1037 STp->device->request_queue->rq_timeout,
992 retval = (STp->buffer)->syscall_result; 1038 MAX_READY_RETRIES);
1039 if (retval) {
1040 st_release_request(SRpnt);
993 goto err_out; 1041 goto err_out;
994 } 1042 }
995 1043
@@ -1013,10 +1061,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1013 cmd[0] = MODE_SENSE; 1061 cmd[0] = MODE_SENSE;
1014 cmd[4] = 12; 1062 cmd[4] = 12;
1015 1063
1016 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE, 1064 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
1017 STp->device->timeout, MAX_READY_RETRIES, 1); 1065 STp->buffer->b_data, 12,
1018 if (!SRpnt) { 1066 STp->device->request_queue->rq_timeout,
1019 retval = (STp->buffer)->syscall_result; 1067 MAX_READY_RETRIES);
1068 if (retval) {
1069 st_release_request(SRpnt);
1020 goto err_out; 1070 goto err_out;
1021 } 1071 }
1022 1072
@@ -1246,10 +1296,17 @@ static int st_flush(struct file *filp, fl_owner_t id)
1246 cmd[0] = WRITE_FILEMARKS; 1296 cmd[0] = WRITE_FILEMARKS;
1247 cmd[4] = 1 + STp->two_fm; 1297 cmd[4] = 1 + STp->two_fm;
1248 1298
1249 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 1299 SRpnt = st_allocate_request(STp);
1250 STp->device->timeout, MAX_WRITE_RETRIES, 1);
1251 if (!SRpnt) { 1300 if (!SRpnt) {
1252 result = (STp->buffer)->syscall_result; 1301 result = STp->buffer->syscall_result;
1302 goto out;
1303 }
1304
1305 result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
1306 STp->device->request_queue->rq_timeout,
1307 MAX_WRITE_RETRIES);
1308 if (result) {
1309 st_release_request(SRpnt);
1253 goto out; 1310 goto out;
1254 } 1311 }
1255 1312
@@ -1634,7 +1691,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
1634 cmd[4] = blks; 1691 cmd[4] = blks;
1635 1692
1636 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE, 1693 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
1637 STp->device->timeout, MAX_WRITE_RETRIES, !async_write); 1694 STp->device->request_queue->rq_timeout,
1695 MAX_WRITE_RETRIES, !async_write);
1638 if (!SRpnt) { 1696 if (!SRpnt) {
1639 retval = STbp->syscall_result; 1697 retval = STbp->syscall_result;
1640 goto out; 1698 goto out;
@@ -1804,7 +1862,8 @@ static long read_tape(struct scsi_tape *STp, long count,
1804 1862
1805 SRpnt = *aSRpnt; 1863 SRpnt = *aSRpnt;
1806 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE, 1864 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
1807 STp->device->timeout, MAX_RETRIES, 1); 1865 STp->device->request_queue->rq_timeout,
1866 MAX_RETRIES, 1);
1808 release_buffering(STp, 1); 1867 release_buffering(STp, 1);
1809 *aSRpnt = SRpnt; 1868 *aSRpnt = SRpnt;
1810 if (!SRpnt) 1869 if (!SRpnt)
@@ -2213,7 +2272,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2213 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name, 2272 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
2214 (value & ~MT_ST_SET_LONG_TIMEOUT))); 2273 (value & ~MT_ST_SET_LONG_TIMEOUT)));
2215 } else { 2274 } else {
2216 STp->device->timeout = value * HZ; 2275 blk_queue_rq_timeout(STp->device->request_queue,
2276 value * HZ);
2217 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n", 2277 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
2218 name, value) ); 2278 name, value) );
2219 } 2279 }
@@ -2311,7 +2371,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2311static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs) 2371static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2312{ 2372{
2313 unsigned char cmd[MAX_COMMAND_SIZE]; 2373 unsigned char cmd[MAX_COMMAND_SIZE];
2314 struct st_request *SRpnt = NULL; 2374 struct st_request *SRpnt;
2375 int ret;
2315 2376
2316 memset(cmd, 0, MAX_COMMAND_SIZE); 2377 memset(cmd, 0, MAX_COMMAND_SIZE);
2317 cmd[0] = MODE_SENSE; 2378 cmd[0] = MODE_SENSE;
@@ -2320,14 +2381,17 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2320 cmd[2] = page; 2381 cmd[2] = page;
2321 cmd[4] = 255; 2382 cmd[4] = 255;
2322 2383
2323 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, 2384 SRpnt = st_allocate_request(STp);
2324 STp->device->timeout, 0, 1); 2385 if (!SRpnt)
2325 if (SRpnt == NULL) 2386 return STp->buffer->syscall_result;
2326 return (STp->buffer)->syscall_result;
2327 2387
2388 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
2389 STp->buffer->b_data, cmd[4],
2390 STp->device->request_queue->rq_timeout,
2391 MAX_RETRIES);
2328 st_release_request(SRpnt); 2392 st_release_request(SRpnt);
2329 2393
2330 return (STp->buffer)->syscall_result; 2394 return ret ? : STp->buffer->syscall_result;
2331} 2395}
2332 2396
2333 2397
@@ -2335,9 +2399,9 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2335 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ 2399 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
2336static int write_mode_page(struct scsi_tape *STp, int page, int slow) 2400static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2337{ 2401{
2338 int pgo; 2402 int pgo, timeout, ret = 0;
2339 unsigned char cmd[MAX_COMMAND_SIZE]; 2403 unsigned char cmd[MAX_COMMAND_SIZE];
2340 struct st_request *SRpnt = NULL; 2404 struct st_request *SRpnt;
2341 2405
2342 memset(cmd, 0, MAX_COMMAND_SIZE); 2406 memset(cmd, 0, MAX_COMMAND_SIZE);
2343 cmd[0] = MODE_SELECT; 2407 cmd[0] = MODE_SELECT;
@@ -2351,14 +2415,21 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2351 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; 2415 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
2352 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; 2416 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
2353 2417
2354 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, 2418 SRpnt = st_allocate_request(STp);
2355 (slow ? STp->long_timeout : STp->device->timeout), 0, 1); 2419 if (!SRpnt)
2356 if (SRpnt == NULL) 2420 return ret;
2357 return (STp->buffer)->syscall_result; 2421
2422 timeout = slow ? STp->long_timeout :
2423 STp->device->request_queue->rq_timeout;
2424
2425 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
2426 STp->buffer->b_data, cmd[4], timeout, 0);
2427 if (!ret)
2428 ret = STp->buffer->syscall_result;
2358 2429
2359 st_release_request(SRpnt); 2430 st_release_request(SRpnt);
2360 2431
2361 return (STp->buffer)->syscall_result; 2432 return ret;
2362} 2433}
2363 2434
2364 2435
@@ -2464,7 +2535,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2464 } 2535 }
2465 if (STp->immediate) { 2536 if (STp->immediate) {
2466 cmd[1] = 1; /* Don't wait for completion */ 2537 cmd[1] = 1; /* Don't wait for completion */
2467 timeout = STp->device->timeout; 2538 timeout = STp->device->request_queue->rq_timeout;
2468 } 2539 }
2469 else 2540 else
2470 timeout = STp->long_timeout; 2541 timeout = STp->long_timeout;
@@ -2476,13 +2547,16 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2476 printk(ST_DEB_MSG "%s: Loading tape.\n", name); 2547 printk(ST_DEB_MSG "%s: Loading tape.\n", name);
2477 ); 2548 );
2478 2549
2479 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 2550 SRpnt = st_allocate_request(STp);
2480 timeout, MAX_RETRIES, 1);
2481 if (!SRpnt) 2551 if (!SRpnt)
2482 return (STp->buffer)->syscall_result; 2552 return STp->buffer->syscall_result;
2553
2554 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
2555 MAX_RETRIES);
2556 if (retval)
2557 goto out;
2483 2558
2484 retval = (STp->buffer)->syscall_result; 2559 retval = (STp->buffer)->syscall_result;
2485 st_release_request(SRpnt);
2486 2560
2487 if (!retval) { /* SCSI command successful */ 2561 if (!retval) { /* SCSI command successful */
2488 2562
@@ -2501,6 +2575,8 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2501 STps = &(STp->ps[STp->partition]); 2575 STps = &(STp->ps[STp->partition]);
2502 STps->drv_file = STps->drv_block = (-1); 2576 STps->drv_file = STps->drv_block = (-1);
2503 } 2577 }
2578out:
2579 st_release_request(SRpnt);
2504 2580
2505 return retval; 2581 return retval;
2506} 2582}
@@ -2638,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2638 cmd[2] = (arg >> 16); 2714 cmd[2] = (arg >> 16);
2639 cmd[3] = (arg >> 8); 2715 cmd[3] = (arg >> 8);
2640 cmd[4] = arg; 2716 cmd[4] = arg;
2641 timeout = STp->device->timeout; 2717 timeout = STp->device->request_queue->rq_timeout;
2642 DEBC( 2718 DEBC(
2643 if (cmd_in == MTWEOF) 2719 if (cmd_in == MTWEOF)
2644 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, 2720 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
@@ -2656,7 +2732,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2656 cmd[0] = REZERO_UNIT; 2732 cmd[0] = REZERO_UNIT;
2657 if (STp->immediate) { 2733 if (STp->immediate) {
2658 cmd[1] = 1; /* Don't wait for completion */ 2734 cmd[1] = 1; /* Don't wait for completion */
2659 timeout = STp->device->timeout; 2735 timeout = STp->device->request_queue->rq_timeout;
2660 } 2736 }
2661 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name)); 2737 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
2662 fileno = blkno = at_sm = 0; 2738 fileno = blkno = at_sm = 0;
@@ -2669,7 +2745,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2669 cmd[0] = START_STOP; 2745 cmd[0] = START_STOP;
2670 if (STp->immediate) { 2746 if (STp->immediate) {
2671 cmd[1] = 1; /* Don't wait for completion */ 2747 cmd[1] = 1; /* Don't wait for completion */
2672 timeout = STp->device->timeout; 2748 timeout = STp->device->request_queue->rq_timeout;
2673 } 2749 }
2674 cmd[4] = 3; 2750 cmd[4] = 3;
2675 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name)); 2751 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
@@ -2702,7 +2778,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2702 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */ 2778 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
2703 if (STp->immediate) { 2779 if (STp->immediate) {
2704 cmd[1] |= 2; /* Don't wait for completion */ 2780 cmd[1] |= 2; /* Don't wait for completion */
2705 timeout = STp->device->timeout; 2781 timeout = STp->device->request_queue->rq_timeout;
2706 } 2782 }
2707 else 2783 else
2708 timeout = STp->long_timeout * 8; 2784 timeout = STp->long_timeout * 8;
@@ -2754,7 +2830,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2754 (STp->buffer)->b_data[9] = (ltmp >> 16); 2830 (STp->buffer)->b_data[9] = (ltmp >> 16);
2755 (STp->buffer)->b_data[10] = (ltmp >> 8); 2831 (STp->buffer)->b_data[10] = (ltmp >> 8);
2756 (STp->buffer)->b_data[11] = ltmp; 2832 (STp->buffer)->b_data[11] = ltmp;
2757 timeout = STp->device->timeout; 2833 timeout = STp->device->request_queue->rq_timeout;
2758 DEBC( 2834 DEBC(
2759 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) 2835 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
2760 printk(ST_DEB_MSG 2836 printk(ST_DEB_MSG
@@ -2776,12 +2852,15 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2776 return (-ENOSYS); 2852 return (-ENOSYS);
2777 } 2853 }
2778 2854
2779 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction, 2855 SRpnt = st_allocate_request(STp);
2780 timeout, MAX_RETRIES, 1);
2781 if (!SRpnt) 2856 if (!SRpnt)
2782 return (STp->buffer)->syscall_result; 2857 return (STp->buffer)->syscall_result;
2783 2858
2784 ioctl_result = (STp->buffer)->syscall_result; 2859 ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
2860 STp->buffer->b_data, datalen,
2861 timeout, MAX_RETRIES);
2862 if (!ioctl_result)
2863 ioctl_result = (STp->buffer)->syscall_result;
2785 2864
2786 if (!ioctl_result) { /* SCSI command successful */ 2865 if (!ioctl_result) { /* SCSI command successful */
2787 st_release_request(SRpnt); 2866 st_release_request(SRpnt);
@@ -2943,10 +3022,17 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
2943 if (!logical && !STp->scsi2_logical) 3022 if (!logical && !STp->scsi2_logical)
2944 scmd[1] = 1; 3023 scmd[1] = 1;
2945 } 3024 }
2946 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE, 3025
2947 STp->device->timeout, MAX_READY_RETRIES, 1); 3026 SRpnt = st_allocate_request(STp);
2948 if (!SRpnt) 3027 if (!SRpnt)
2949 return (STp->buffer)->syscall_result; 3028 return STp->buffer->syscall_result;
3029
3030 result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
3031 STp->buffer->b_data, 20,
3032 STp->device->request_queue->rq_timeout,
3033 MAX_READY_RETRIES);
3034 if (result)
3035 goto out;
2950 3036
2951 if ((STp->buffer)->syscall_result != 0 || 3037 if ((STp->buffer)->syscall_result != 0 ||
2952 (STp->device->scsi_level >= SCSI_2 && 3038 (STp->device->scsi_level >= SCSI_2 &&
@@ -2974,6 +3060,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
2974 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, 3060 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
2975 *block, *partition)); 3061 *block, *partition));
2976 } 3062 }
3063out:
2977 st_release_request(SRpnt); 3064 st_release_request(SRpnt);
2978 SRpnt = NULL; 3065 SRpnt = NULL;
2979 3066
@@ -3045,13 +3132,17 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3045 } 3132 }
3046 if (STp->immediate) { 3133 if (STp->immediate) {
3047 scmd[1] |= 1; /* Don't wait for completion */ 3134 scmd[1] |= 1; /* Don't wait for completion */
3048 timeout = STp->device->timeout; 3135 timeout = STp->device->request_queue->rq_timeout;
3049 } 3136 }
3050 3137
3051 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, 3138 SRpnt = st_allocate_request(STp);
3052 timeout, MAX_READY_RETRIES, 1);
3053 if (!SRpnt) 3139 if (!SRpnt)
3054 return (STp->buffer)->syscall_result; 3140 return STp->buffer->syscall_result;
3141
3142 result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
3143 timeout, MAX_READY_RETRIES);
3144 if (result)
3145 goto out;
3055 3146
3056 STps->drv_block = STps->drv_file = (-1); 3147 STps->drv_block = STps->drv_file = (-1);
3057 STps->eof = ST_NOEOF; 3148 STps->eof = ST_NOEOF;
@@ -3076,7 +3167,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3076 STps->drv_block = STps->drv_file = 0; 3167 STps->drv_block = STps->drv_file = 0;
3077 result = 0; 3168 result = 0;
3078 } 3169 }
3079 3170out:
3080 st_release_request(SRpnt); 3171 st_release_request(SRpnt);
3081 SRpnt = NULL; 3172 SRpnt = NULL;
3082 3173
@@ -4029,7 +4120,7 @@ static int st_probe(struct device *dev)
4029 tpnt->partition = 0; 4120 tpnt->partition = 0;
4030 tpnt->new_partition = 0; 4121 tpnt->new_partition = 0;
4031 tpnt->nbr_partitions = 0; 4122 tpnt->nbr_partitions = 0;
4032 tpnt->device->timeout = ST_TIMEOUT; 4123 blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
4033 tpnt->long_timeout = ST_LONG_TIMEOUT; 4124 tpnt->long_timeout = ST_LONG_TIMEOUT;
4034 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma; 4125 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
4035 4126
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 2fa830c0be27..a3a18ad73125 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1108,8 +1108,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1108 goto out_scsi_host_put; 1108 goto out_scsi_host_put;
1109 } 1109 }
1110 1110
1111 hba->mmio_base = ioremap_nocache(pci_resource_start(pdev, 0), 1111 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1112 pci_resource_len(pdev, 0));
1113 if ( !hba->mmio_base) { 1112 if ( !hba->mmio_base) {
1114 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", 1113 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1115 pci_name(pdev)); 1114 pci_name(pdev));
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index f7d279542fa5..e5c369bb568f 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -6,7 +6,7 @@
6 * Changes : 6 * Changes :
7 * 7 *
8 * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking 8 * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking
9 * Alan Cox <alan@redhat.com> : Cleaned up code formatting 9 * Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting
10 * Fixed an irq locking bug 10 * Fixed an irq locking bug
11 * Added ISAPnP support 11 * Added ISAPnP support
12 * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates 12 * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 69ac6e590f1d..9a4273445c0d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2572,9 +2572,10 @@ static struct pci_driver dc390_driver = {
2572 2572
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering) {
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 }
2578 2579
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2580 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2581 tmscsim[0] = 7;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 329eb8780e74..601e95141cbe 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,8 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
1111 1111
1112static void map_dma(unsigned int i, unsigned int j) { 1112static void map_dma(unsigned int i, unsigned int j) {
1113 unsigned int data_len = 0; 1113 unsigned int data_len = 0;
1114 unsigned int k, count, pci_dir; 1114 unsigned int k, pci_dir;
1115 int count;
1115 struct scatterlist *sg; 1116 struct scatterlist *sg;
1116 struct mscp *cpp; 1117 struct mscp *cpp;
1117 struct scsi_cmnd *SCpnt; 1118 struct scsi_cmnd *SCpnt;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d4c13561f4a6..093610bcfcce 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -146,13 +146,13 @@
146 * 146 *
147 * use host->host_lock, not io_request_lock, cleanups 147 * use host->host_lock, not io_request_lock, cleanups
148 * 148 *
149 * 2002/10/04 - Alan Cox <alan@redhat.com> 149 * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
150 * 150 *
151 * Use dev_id for interrupts, kill __func__ pasting 151 * Use dev_id for interrupts, kill __func__ pasting
152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff 152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff
153 * Use the adapter lock for the other places we had the cli's 153 * Use the adapter lock for the other places we had the cli's
154 * 154 *
155 * 2002/10/06 - Alan Cox <alan@redhat.com> 155 * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
156 * 156 *
157 * Switch to new style error handling 157 * Switch to new style error handling
158 * Clean up delay to udelay, and yielding sleeps 158 * Clean up delay to udelay, and yielding sleeps