aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /drivers/scsi
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig42
-rw-r--r--drivers/scsi/Makefile4
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig7
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/dpt/osd_util.h2
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/fcoe/fcoe.c95
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/libfcoe.c21
-rw-r--r--drivers/scsi/fnic/Makefile15
-rw-r--r--drivers/scsi/fnic/cq_desc.h78
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h167
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h182
-rw-r--r--drivers/scsi/fnic/fcpio.h780
-rw-r--r--drivers/scsi/fnic/fnic.h265
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c56
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c742
-rw-r--r--drivers/scsi/fnic/fnic_io.h67
-rw-r--r--drivers/scsi/fnic/fnic_isr.c332
-rw-r--r--drivers/scsi/fnic/fnic_main.c943
-rw-r--r--drivers/scsi/fnic/fnic_res.c444
-rw-r--r--drivers/scsi/fnic/fnic_res.h197
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1850
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h58
-rw-r--r--drivers/scsi/fnic/vnic_cq.c85
-rw-r--r--drivers/scsi/fnic/vnic_cq.h121
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h62
-rw-r--r--drivers/scsi/fnic/vnic_dev.c690
-rw-r--r--drivers/scsi/fnic/vnic_dev.h161
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h281
-rw-r--r--drivers/scsi/fnic/vnic_intr.c60
-rw-r--r--drivers/scsi/fnic/vnic_intr.h118
-rw-r--r--drivers/scsi/fnic/vnic_nic.h69
-rw-r--r--drivers/scsi/fnic/vnic_resource.h61
-rw-r--r--drivers/scsi/fnic/vnic_rq.c196
-rw-r--r--drivers/scsi/fnic/vnic_rq.h235
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h99
-rw-r--r--drivers/scsi/fnic/vnic_stats.h68
-rw-r--r--drivers/scsi/fnic/vnic_wq.c182
-rw-r--r--drivers/scsi/fnic/vnic_wq.h175
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c117
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h128
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c434
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c468
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c956
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c59
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c155
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c240
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c244
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c101
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c175
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c71
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c49
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/u14-34f.c22
150 files changed, 43408 insertions, 6747 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 8b7983aba8f..36c21b19e5d 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1978{ 1978{
1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1980 1980
1981 scsi_dma_unmap(cmd); 1981 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1982 scsi_dma_unmap(cmd);
1982} /* End twa_unmap_scsi_data() */ 1983} /* End twa_unmap_scsi_data() */
1983 1984
1984/* scsi_host_template initializer */ 1985/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c03f1d2c9e2..faa0fcfed71 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1294{ 1294{
1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1296 1296
1297 scsi_dma_unmap(cmd); 1297 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1298 scsi_dma_unmap(cmd);
1298} /* End tw_unmap_scsi_data() */ 1299} /* End tw_unmap_scsi_data() */
1299 1300
1300/* This function will reset a device extension */ 1301/* This function will reset a device extension */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 8e71e5e122b..a5a2ba2561d 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990c826..6a19ed9a119 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
191 it has an enclosure device. Selecting this option will just allow 191 it has an enclosure device. Selecting this option will just allow
192 certain enclosure conditions to be reported and is not required. 192 certain enclosure conditions to be reported and is not required.
193 193
194comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
195 depends on SCSI
196
197config SCSI_MULTI_LUN 194config SCSI_MULTI_LUN
198 bool "Probe all LUNs on each SCSI device" 195 bool "Probe all LUNs on each SCSI device"
199 depends on SCSI 196 depends on SCSI
200 help 197 help
201 If you have a SCSI device that supports more than one LUN (Logical 198 Some devices support more than one LUN (Logical Unit Number) in order
202 Unit Number), e.g. a CD jukebox, and only one LUN is detected, you 199 to allow access to several media, e.g. CD jukebox, USB card reader,
203 can say Y here to force the SCSI driver to probe for multiple LUNs. 200 mobile phone in mass storage mode. This option forces the kernel to
204 A SCSI device with multiple LUNs acts logically like multiple SCSI 201 probe for all LUNs by default. This setting can be overriden by
205 devices. The vast majority of SCSI devices have only one LUN, and 202 max_luns boot/module parameter. Note that this option does not affect
206 so most people can say N here. The max_luns boot/module parameter 203 devices conforming to SCSI-3 or higher as they can explicitely report
207 allows to override this setting. 204 their number of LUNs. It is safe to say Y here unless you have one of
205 those rare devices which reacts in an unexpected way when probed for
206 multiple LUNs.
208 207
209config SCSI_CONSTANTS 208config SCSI_CONSTANTS
210 bool "Verbose SCSI error reporting (kernel size +=12K)" 209 bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
355 http://open-iscsi.org 354 http://open-iscsi.org
356 355
357source "drivers/scsi/cxgb3i/Kconfig" 356source "drivers/scsi/cxgb3i/Kconfig"
357source "drivers/scsi/bnx2i/Kconfig"
358 358
359config SGIWD93_SCSI 359config SGIWD93_SCSI
360 tristate "SGI WD93C93 SCSI Driver" 360 tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
508 508
509source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 509source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
510source "drivers/scsi/aic94xx/Kconfig" 510source "drivers/scsi/aic94xx/Kconfig"
511source "drivers/scsi/mvsas/Kconfig"
511 512
512config SCSI_DPT_I2O 513config SCSI_DPT_I2O
513 tristate "Adaptec I2O RAID support " 514 tristate "Adaptec I2O RAID support "
@@ -628,6 +629,17 @@ config FCOE
628 ---help--- 629 ---help---
629 Fibre Channel over Ethernet module 630 Fibre Channel over Ethernet module
630 631
632config FCOE_FNIC
633 tristate "Cisco FNIC Driver"
634 depends on PCI && X86
635 select LIBFC
636 help
637 This is support for the Cisco PCI-Express FCoE HBA.
638
639 To compile this driver as a module, choose M here and read
640 <file:Documentation/scsi/scsi.txt>.
641 The module will be called fnic.
642
631config SCSI_DMX3191D 643config SCSI_DMX3191D
632 tristate "DMX3191D SCSI support" 644 tristate "DMX3191D SCSI support"
633 depends on PCI && SCSI 645 depends on PCI && SCSI
@@ -1039,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
1039 1051
1040 Generally, saying N is fine. 1052 Generally, saying N is fine.
1041 1053
1042config SCSI_MVSAS
1043 tristate "Marvell 88SE6440 SAS/SATA support"
1044 depends on PCI && SCSI
1045 select SCSI_SAS_LIBSAS
1046 help
1047 This driver supports Marvell SAS/SATA PCI devices.
1048
1049 To compiler this driver as a module, choose M here: the module
1050 will be called mvsas.
1051
1052config SCSI_NCR53C406A 1054config SCSI_NCR53C406A
1053 tristate "NCR53c406a SCSI support" 1055 tristate "NCR53c406a SCSI support"
1054 depends on ISA && SCSI 1056 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e7c861ac417..25429ea63d0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/
39obj-$(CONFIG_LIBFC) += libfc/ 39obj-$(CONFIG_LIBFC) += libfc/
40obj-$(CONFIG_LIBFCOE) += fcoe/ 40obj-$(CONFIG_LIBFCOE) += fcoe/
41obj-$(CONFIG_FCOE) += fcoe/ 41obj-$(CONFIG_FCOE) += fcoe/
42obj-$(CONFIG_FCOE_FNIC) += fnic/
42obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 43obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
43obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 44obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
44obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 45obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
@@ -125,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
125obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
126obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
127obj-$(CONFIG_SCSI_STEX) += stex.o 128obj-$(CONFIG_SCSI_STEX) += stex.o
128obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 129obj-$(CONFIG_SCSI_MVSAS) += mvsas/
129obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
130obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
131 133
132obj-$(CONFIG_ARM) += arm/ 134obj-$(CONFIG_ARM) += arm/
133 135
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d845868..1cdf09a4779 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
224 return ret; 224 return ret;
225} 225}
226 226
227static int 227static irqreturn_t
228NCR_D700_intr(int irq, void *data) 228NCR_D700_intr(int irq, void *data)
229{ 229{
230 struct NCR_D700_private *p = (struct NCR_D700_private *)data; 230 struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 00000000000..2fceb19eb27
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_
13
14/**
15* This file defines HSI constants for the iSCSI flows
16*/
17
18/* iSCSI request op codes */
19#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
20
21/* iSCSI response/messages op codes */
22#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
23#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
24
25/* iSCSI task types */
26#define ISCSI_TASK_TYPE_READ (0)
27#define ISCSI_TASK_TYPE_WRITE (1)
28#define ISCSI_TASK_TYPE_MPATH (2)
29
30/* initial CQ sequence numbers */
31#define ISCSI_INITIAL_SN (1)
32
33/* KWQ (kernel work queue) layer codes */
34#define ISCSI_KWQE_LAYER_CODE (6)
35
36/* KWQ (kernel work queue) request op codes */
37#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
38#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
39#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
40#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
41#define ISCSI_KWQE_OPCODE_INIT1 (4)
42#define ISCSI_KWQE_OPCODE_INIT2 (5)
43
44/* KCQ (kernel completion queue) response op codes */
45#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
46#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
47#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
48#define ISCSI_KCQE_OPCODE_INIT (0x14)
49#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
50#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
51#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
52#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
53#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
54#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
55#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
56
57/* KCQ (kernel completion queue) completion status */
58#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
59#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
60#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
61#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
62#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
63
64#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
65#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
66
67#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
68#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
69#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
70#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
71#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
72
73/* Response */
74#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
75#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
76#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
77#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
78#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
79#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
80#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
81#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
82#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
83#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
84#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
85
86/* Data-In */
87#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
88#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
89#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
90#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
91#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
92#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
93
94/* R2T */
95#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
96#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
97#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
98#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
99#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
100#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
101#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
102#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
103#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
104
105/* TMF */
106#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
107#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
108#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
109#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
110
111/* IP/TCP processing errors: */
112#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
113#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
114#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
115#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
116
117/* iSCSI licensing errors */
118/* general iSCSI license not installed */
119#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122
123/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16)
126#define ISCSI_CQ_DB_SIZE (80)
127
128#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
129
130/* Page size codes (for flags field in connection offload request) */
131#define ISCSI_PAGE_SIZE_256 (0)
132#define ISCSI_PAGE_SIZE_512 (1)
133#define ISCSI_PAGE_SIZE_1K (2)
134#define ISCSI_PAGE_SIZE_2K (3)
135#define ISCSI_PAGE_SIZE_4K (4)
136#define ISCSI_PAGE_SIZE_8K (5)
137#define ISCSI_PAGE_SIZE_16K (6)
138#define ISCSI_PAGE_SIZE_32K (7)
139#define ISCSI_PAGE_SIZE_64K (8)
140#define ISCSI_PAGE_SIZE_128K (9)
141#define ISCSI_PAGE_SIZE_256K (10)
142#define ISCSI_PAGE_SIZE_512K (11)
143#define ISCSI_PAGE_SIZE_1M (12)
144#define ISCSI_PAGE_SIZE_2M (13)
145#define ISCSI_PAGE_SIZE_4M (14)
146#define ISCSI_PAGE_SIZE_8M (15)
147
148/* Iscsi PDU related defines */
149#define ISCSI_HEADER_SIZE (48)
150#define ISCSI_DIGEST_SHIFT (2)
151#define ISCSI_DIGEST_SIZE (4)
152
153#define B577XX_ISCSI_CONNECTION_TYPE 3
154
155#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 00000000000..36af1afef9b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__
13
14/*
15 * iSCSI Async CQE
16 */
17struct bnx2i_async_msg {
18#if defined(__BIG_ENDIAN)
19 u8 op_code;
20 u8 reserved1;
21 u16 reserved0;
22#elif defined(__LITTLE_ENDIAN)
23 u16 reserved0;
24 u8 reserved1;
25 u8 op_code;
26#endif
27 u32 reserved2;
28 u32 exp_cmd_sn;
29 u32 max_cmd_sn;
30 u32 reserved3[2];
31#if defined(__BIG_ENDIAN)
32 u16 reserved5;
33 u8 err_code;
34 u8 reserved4;
35#elif defined(__LITTLE_ENDIAN)
36 u8 reserved4;
37 u8 err_code;
38 u16 reserved5;
39#endif
40 u32 reserved6;
41 u32 lun[2];
42#if defined(__BIG_ENDIAN)
43 u8 async_event;
44 u8 async_vcode;
45 u16 param1;
46#elif defined(__LITTLE_ENDIAN)
47 u16 param1;
48 u8 async_vcode;
49 u8 async_event;
50#endif
51#if defined(__BIG_ENDIAN)
52 u16 param2;
53 u16 param3;
54#elif defined(__LITTLE_ENDIAN)
55 u16 param3;
56 u16 param2;
57#endif
58 u32 reserved7[3];
59 u32 cq_req_sn;
60};
61
62
63/*
64 * iSCSI Buffer Descriptor (BD)
65 */
66struct iscsi_bd {
67 u32 buffer_addr_hi;
68 u32 buffer_addr_lo;
69#if defined(__BIG_ENDIAN)
70 u16 reserved0;
71 u16 buffer_length;
72#elif defined(__LITTLE_ENDIAN)
73 u16 buffer_length;
74 u16 reserved0;
75#endif
76#if defined(__BIG_ENDIAN)
77 u16 reserved3;
78 u16 flags;
79#define ISCSI_BD_RESERVED1 (0x3F<<0)
80#define ISCSI_BD_RESERVED1_SHIFT 0
81#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
82#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
83#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
84#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
85#define ISCSI_BD_RESERVED2 (0xFF<<8)
86#define ISCSI_BD_RESERVED2_SHIFT 8
87#elif defined(__LITTLE_ENDIAN)
88 u16 flags;
89#define ISCSI_BD_RESERVED1 (0x3F<<0)
90#define ISCSI_BD_RESERVED1_SHIFT 0
91#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
92#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
93#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
94#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
95#define ISCSI_BD_RESERVED2 (0xFF<<8)
96#define ISCSI_BD_RESERVED2_SHIFT 8
97 u16 reserved3;
98#endif
99};
100
101
102/*
103 * iSCSI Cleanup SQ WQE
104 */
105struct bnx2i_cleanup_request {
106#if defined(__BIG_ENDIAN)
107 u8 op_code;
108 u8 reserved1;
109 u16 reserved0;
110#elif defined(__LITTLE_ENDIAN)
111 u16 reserved0;
112 u8 reserved1;
113 u8 op_code;
114#endif
115 u32 reserved2[3];
116#if defined(__BIG_ENDIAN)
117 u16 reserved3;
118 u16 itt;
119#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
120#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
121#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
122#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
123#elif defined(__LITTLE_ENDIAN)
124 u16 itt;
125#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
126#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
127#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
128#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
129 u16 reserved3;
130#endif
131 u32 reserved4[10];
132#if defined(__BIG_ENDIAN)
133 u8 cq_index;
134 u8 reserved6;
135 u16 reserved5;
136#elif defined(__LITTLE_ENDIAN)
137 u16 reserved5;
138 u8 reserved6;
139 u8 cq_index;
140#endif
141};
142
143
144/*
145 * iSCSI Cleanup CQE
146 */
147struct bnx2i_cleanup_response {
148#if defined(__BIG_ENDIAN)
149 u8 op_code;
150 u8 status;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 status;
155 u8 op_code;
156#endif
157 u32 reserved1[3];
158 u32 reserved2[2];
159#if defined(__BIG_ENDIAN)
160 u16 reserved4;
161 u8 err_code;
162 u8 reserved3;
163#elif defined(__LITTLE_ENDIAN)
164 u8 reserved3;
165 u8 err_code;
166 u16 reserved4;
167#endif
168 u32 reserved5[7];
169#if defined(__BIG_ENDIAN)
170 u16 reserved6;
171 u16 itt;
172#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
173#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
174#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
175#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
176#elif defined(__LITTLE_ENDIAN)
177 u16 itt;
178#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
179#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
180#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
181#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
182 u16 reserved6;
183#endif
184 u32 cq_req_sn;
185};
186
187
188/*
189 * SCSI read/write SQ WQE
190 */
191struct bnx2i_cmd_request {
192#if defined(__BIG_ENDIAN)
193 u8 op_code;
194 u8 op_attr;
195#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
196#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
197#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
198#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
199#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
200#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
201#define ISCSI_CMD_REQUEST_READ (0x1<<6)
202#define ISCSI_CMD_REQUEST_READ_SHIFT 6
203#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
204#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
205 u16 reserved0;
206#elif defined(__LITTLE_ENDIAN)
207 u16 reserved0;
208 u8 op_attr;
209#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
210#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
211#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
212#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
213#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
214#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
215#define ISCSI_CMD_REQUEST_READ (0x1<<6)
216#define ISCSI_CMD_REQUEST_READ_SHIFT 6
217#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
218#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
219 u8 op_code;
220#endif
221#if defined(__BIG_ENDIAN)
222 u16 ud_buffer_offset;
223 u16 sd_buffer_offset;
224#elif defined(__LITTLE_ENDIAN)
225 u16 sd_buffer_offset;
226 u16 ud_buffer_offset;
227#endif
228 u32 lun[2];
229#if defined(__BIG_ENDIAN)
230 u16 reserved2;
231 u16 itt;
232#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
233#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
234#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
235#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
236#elif defined(__LITTLE_ENDIAN)
237 u16 itt;
238#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
239#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
240#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
241#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
242 u16 reserved2;
243#endif
244 u32 total_data_transfer_length;
245 u32 cmd_sn;
246 u32 reserved3;
247 u32 cdb[4];
248 u32 zero_fill;
249 u32 bd_list_addr_lo;
250 u32 bd_list_addr_hi;
251#if defined(__BIG_ENDIAN)
252 u8 cq_index;
253 u8 sd_start_bd_index;
254 u8 ud_start_bd_index;
255 u8 num_bds;
256#elif defined(__LITTLE_ENDIAN)
257 u8 num_bds;
258 u8 ud_start_bd_index;
259 u8 sd_start_bd_index;
260 u8 cq_index;
261#endif
262};
263
264
265/*
266 * task statistics for write response
267 */
268struct bnx2i_write_resp_task_stat {
269 u32 num_data_ins;
270};
271
272/*
273 * task statistics for read response
274 */
275struct bnx2i_read_resp_task_stat {
276#if defined(__BIG_ENDIAN)
277 u16 num_data_outs;
278 u16 num_r2ts;
279#elif defined(__LITTLE_ENDIAN)
280 u16 num_r2ts;
281 u16 num_data_outs;
282#endif
283};
284
285/*
286 * task statistics for iSCSI cmd response
287 */
288union bnx2i_cmd_resp_task_stat {
289 struct bnx2i_write_resp_task_stat write_stat;
290 struct bnx2i_read_resp_task_stat read_stat;
291};
292
293/*
294 * SCSI Command CQE
295 */
296struct bnx2i_cmd_response {
297#if defined(__BIG_ENDIAN)
298 u8 op_code;
299 u8 response_flags;
300#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
301#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
302#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
303#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
304#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
305#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
306#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
307#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
308#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
309#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
310#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
311#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
312 u8 response;
313 u8 status;
314#elif defined(__LITTLE_ENDIAN)
315 u8 status;
316 u8 response;
317 u8 response_flags;
318#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
319#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
320#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
321#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
322#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
323#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
324#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
325#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
326#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
327#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
328#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
329#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
330 u8 op_code;
331#endif
332 u32 data_length;
333 u32 exp_cmd_sn;
334 u32 max_cmd_sn;
335 u32 reserved2;
336 u32 residual_count;
337#if defined(__BIG_ENDIAN)
338 u16 reserved4;
339 u8 err_code;
340 u8 reserved3;
341#elif defined(__LITTLE_ENDIAN)
342 u8 reserved3;
343 u8 err_code;
344 u16 reserved4;
345#endif
346 u32 reserved5[5];
347 union bnx2i_cmd_resp_task_stat task_stat;
348 u32 reserved6;
349#if defined(__BIG_ENDIAN)
350 u16 reserved7;
351 u16 itt;
352#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
353#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
354#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
355#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
356#elif defined(__LITTLE_ENDIAN)
357 u16 itt;
358#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
359#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
360#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
361#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
362 u16 reserved7;
363#endif
364 u32 cq_req_sn;
365};
366
367
368
369/*
370 * firmware middle-path request SQ WQE
371 */
372struct bnx2i_fw_mp_request {
373#if defined(__BIG_ENDIAN)
374 u8 op_code;
375 u8 op_attr;
376 u16 hdr_opaque1;
377#elif defined(__LITTLE_ENDIAN)
378 u16 hdr_opaque1;
379 u8 op_attr;
380 u8 op_code;
381#endif
382 u32 data_length;
383 u32 hdr_opaque2[2];
384#if defined(__BIG_ENDIAN)
385 u16 reserved0;
386 u16 itt;
387#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
388#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
389#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
390#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
391#elif defined(__LITTLE_ENDIAN)
392 u16 itt;
393#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
394#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
395#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
396#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
397 u16 reserved0;
398#endif
399 u32 hdr_opaque3[4];
400 u32 resp_bd_list_addr_lo;
401 u32 resp_bd_list_addr_hi;
402 u32 resp_buffer;
403#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
404#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
405#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
406#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
407#if defined(__BIG_ENDIAN)
408 u16 reserved4;
409 u8 reserved3;
410 u8 flags;
411#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
412#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
413#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
414#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
415#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
416#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
417#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
418#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
419#elif defined(__LITTLE_ENDIAN)
420 u8 flags;
421#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
422#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
423#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
424#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
425#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
426#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
427#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
428#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
429 u8 reserved3;
430 u16 reserved4;
431#endif
432 u32 bd_list_addr_lo;
433 u32 bd_list_addr_hi;
434#if defined(__BIG_ENDIAN)
435 u8 cq_index;
436 u8 reserved6;
437 u8 reserved5;
438 u8 num_bds;
439#elif defined(__LITTLE_ENDIAN)
440 u8 num_bds;
441 u8 reserved5;
442 u8 reserved6;
443 u8 cq_index;
444#endif
445};
446
447
448/*
449 * firmware response - CQE: used only by firmware
450 */
451struct bnx2i_fw_response {
452 u32 hdr_dword1[2];
453 u32 hdr_exp_cmd_sn;
454 u32 hdr_max_cmd_sn;
455 u32 hdr_ttt;
456 u32 hdr_res_cnt;
457 u32 cqe_flags;
458#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
459#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
460#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
461#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
462#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
463#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
464 u32 stat_sn;
465 u32 hdr_dword2[2];
466 u32 hdr_dword3[2];
467 u32 task_stat;
468 u32 reserved0;
469 u32 hdr_itt;
470 u32 cq_req_sn;
471};
472
473
474/*
475 * iSCSI KCQ CQE parameters
476 */
477union iscsi_kcqe_params {
478 u32 reserved0[4];
479};
480
481/*
482 * iSCSI KCQ CQE
483 */
484struct iscsi_kcqe {
485 u32 iscsi_conn_id;
486 u32 completion_status;
487 u32 iscsi_conn_context_id;
488 union iscsi_kcqe_params params;
489#if defined(__BIG_ENDIAN)
490 u8 flags;
491#define ISCSI_KCQE_RESERVED0 (0xF<<0)
492#define ISCSI_KCQE_RESERVED0_SHIFT 0
493#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
494#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
495#define ISCSI_KCQE_RESERVED1 (0x1<<7)
496#define ISCSI_KCQE_RESERVED1_SHIFT 7
497 u8 op_code;
498 u16 qe_self_seq;
499#elif defined(__LITTLE_ENDIAN)
500 u16 qe_self_seq;
501 u8 op_code;
502 u8 flags;
503#define ISCSI_KCQE_RESERVED0 (0xF<<0)
504#define ISCSI_KCQE_RESERVED0_SHIFT 0
505#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
506#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
507#define ISCSI_KCQE_RESERVED1 (0x1<<7)
508#define ISCSI_KCQE_RESERVED1_SHIFT 7
509#endif
510};
511
512
513
514/*
515 * iSCSI KWQE header
516 */
517struct iscsi_kwqe_header {
518#if defined(__BIG_ENDIAN)
519 u8 flags;
520#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
521#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
522#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
523#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
524#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
525#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
526 u8 op_code;
527#elif defined(__LITTLE_ENDIAN)
528 u8 op_code;
529 u8 flags;
530#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
531#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
532#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
533#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
534#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
535#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
536#endif
537};
538
539/*
540 * iSCSI firmware init request 1
541 */
542struct iscsi_kwqe_init1 {
543#if defined(__BIG_ENDIAN)
544 struct iscsi_kwqe_header hdr;
545 u8 reserved0;
546 u8 num_cqs;
547#elif defined(__LITTLE_ENDIAN)
548 u8 num_cqs;
549 u8 reserved0;
550 struct iscsi_kwqe_header hdr;
551#endif
552 u32 dummy_buffer_addr_lo;
553 u32 dummy_buffer_addr_hi;
554#if defined(__BIG_ENDIAN)
555 u16 num_ccells_per_conn;
556 u16 num_tasks_per_conn;
557#elif defined(__LITTLE_ENDIAN)
558 u16 num_tasks_per_conn;
559 u16 num_ccells_per_conn;
560#endif
561#if defined(__BIG_ENDIAN)
562 u16 sq_wqes_per_page;
563 u16 sq_num_wqes;
564#elif defined(__LITTLE_ENDIAN)
565 u16 sq_num_wqes;
566 u16 sq_wqes_per_page;
567#endif
568#if defined(__BIG_ENDIAN)
569 u8 cq_log_wqes_per_page;
570 u8 flags;
571#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
572#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
573#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
574#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
575#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
576#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
577#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
578#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
579 u16 cq_num_wqes;
580#elif defined(__LITTLE_ENDIAN)
581 u16 cq_num_wqes;
582 u8 flags;
583#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
584#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
585#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
586#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
587#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
588#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
589#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
590#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
591 u8 cq_log_wqes_per_page;
592#endif
593#if defined(__BIG_ENDIAN)
594 u16 cq_num_pages;
595 u16 sq_num_pages;
596#elif defined(__LITTLE_ENDIAN)
597 u16 sq_num_pages;
598 u16 cq_num_pages;
599#endif
600#if defined(__BIG_ENDIAN)
601 u16 rq_buffer_size;
602 u16 rq_num_wqes;
603#elif defined(__LITTLE_ENDIAN)
604 u16 rq_num_wqes;
605 u16 rq_buffer_size;
606#endif
607};
608
609/*
610 * iSCSI firmware init request 2
611 */
612struct iscsi_kwqe_init2 {
613#if defined(__BIG_ENDIAN)
614 struct iscsi_kwqe_header hdr;
615 u16 max_cq_sqn;
616#elif defined(__LITTLE_ENDIAN)
617 u16 max_cq_sqn;
618 struct iscsi_kwqe_header hdr;
619#endif
620 u32 error_bit_map[2];
621 u32 reserved1[5];
622};
623
624/*
625 * Initial iSCSI connection offload request 1
626 */
627struct iscsi_kwqe_conn_offload1 {
628#if defined(__BIG_ENDIAN)
629 struct iscsi_kwqe_header hdr;
630 u16 iscsi_conn_id;
631#elif defined(__LITTLE_ENDIAN)
632 u16 iscsi_conn_id;
633 struct iscsi_kwqe_header hdr;
634#endif
635 u32 sq_page_table_addr_lo;
636 u32 sq_page_table_addr_hi;
637 u32 cq_page_table_addr_lo;
638 u32 cq_page_table_addr_hi;
639 u32 reserved0[3];
640};
641
642/*
643 * iSCSI Page Table Entry (PTE)
644 */
645struct iscsi_pte {
646 u32 hi;
647 u32 lo;
648};
649
650/*
651 * Initial iSCSI connection offload request 2
652 */
653struct iscsi_kwqe_conn_offload2 {
654#if defined(__BIG_ENDIAN)
655 struct iscsi_kwqe_header hdr;
656 u16 reserved0;
657#elif defined(__LITTLE_ENDIAN)
658 u16 reserved0;
659 struct iscsi_kwqe_header hdr;
660#endif
661 u32 rq_page_table_addr_lo;
662 u32 rq_page_table_addr_hi;
663 struct iscsi_pte sq_first_pte;
664 struct iscsi_pte cq_first_pte;
665 u32 num_additional_wqes;
666};
667
668
669/*
670 * Initial iSCSI connection offload request 3
671 */
672struct iscsi_kwqe_conn_offload3 {
673#if defined(__BIG_ENDIAN)
674 struct iscsi_kwqe_header hdr;
675 u16 reserved0;
676#elif defined(__LITTLE_ENDIAN)
677 u16 reserved0;
678 struct iscsi_kwqe_header hdr;
679#endif
680 u32 reserved1;
681 struct iscsi_pte qp_first_pte[3];
682};
683
684
685/*
686 * iSCSI connection update request
687 */
688struct iscsi_kwqe_conn_update {
689#if defined(__BIG_ENDIAN)
690 struct iscsi_kwqe_header hdr;
691 u16 reserved0;
692#elif defined(__LITTLE_ENDIAN)
693 u16 reserved0;
694 struct iscsi_kwqe_header hdr;
695#endif
696#if defined(__BIG_ENDIAN)
697 u8 session_error_recovery_level;
698 u8 max_outstanding_r2ts;
699 u8 reserved2;
700 u8 conn_flags;
701#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
702#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
703#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
704#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
705#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
706#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
707#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
708#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
709#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
710#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
711#elif defined(__LITTLE_ENDIAN)
712 u8 conn_flags;
713#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
714#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
715#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
716#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
717#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
718#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
719#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
720#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
721#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
722#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
723 u8 reserved2;
724 u8 max_outstanding_r2ts;
725 u8 session_error_recovery_level;
726#endif
727 u32 context_id;
728 u32 max_send_pdu_length;
729 u32 max_recv_pdu_length;
730 u32 first_burst_length;
731 u32 max_burst_length;
732 u32 exp_stat_sn;
733};
734
735/*
736 * iSCSI destroy connection request
737 */
738struct iscsi_kwqe_conn_destroy {
739#if defined(__BIG_ENDIAN)
740 struct iscsi_kwqe_header hdr;
741 u16 reserved0;
742#elif defined(__LITTLE_ENDIAN)
743 u16 reserved0;
744 struct iscsi_kwqe_header hdr;
745#endif
746 u32 context_id;
747 u32 reserved1[6];
748};
749
750/*
751 * iSCSI KWQ WQE
752 */
753union iscsi_kwqe {
754 struct iscsi_kwqe_init1 init1;
755 struct iscsi_kwqe_init2 init2;
756 struct iscsi_kwqe_conn_offload1 conn_offload1;
757 struct iscsi_kwqe_conn_offload2 conn_offload2;
758 struct iscsi_kwqe_conn_update conn_update;
759 struct iscsi_kwqe_conn_destroy conn_destroy;
760};
761
762/*
763 * iSCSI Login SQ WQE
764 */
765struct bnx2i_login_request {
766#if defined(__BIG_ENDIAN)
767 u8 op_code;
768 u8 op_attr;
769#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
770#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
771#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
772#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
773#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
774#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
775#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
776#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
777#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
778#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
779 u8 version_max;
780 u8 version_min;
781#elif defined(__LITTLE_ENDIAN)
782 u8 version_min;
783 u8 version_max;
784 u8 op_attr;
785#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
786#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
787#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
788#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
789#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
790#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
791#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
792#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
793#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
794#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
795 u8 op_code;
796#endif
797 u32 data_length;
798 u32 isid_lo;
799#if defined(__BIG_ENDIAN)
800 u16 isid_hi;
801 u16 tsih;
802#elif defined(__LITTLE_ENDIAN)
803 u16 tsih;
804 u16 isid_hi;
805#endif
806#if defined(__BIG_ENDIAN)
807 u16 reserved2;
808 u16 itt;
809#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
810#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
811#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
812#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
813#elif defined(__LITTLE_ENDIAN)
814 u16 itt;
815#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
816#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
817#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
818#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
819 u16 reserved2;
820#endif
821#if defined(__BIG_ENDIAN)
822 u16 cid;
823 u16 reserved3;
824#elif defined(__LITTLE_ENDIAN)
825 u16 reserved3;
826 u16 cid;
827#endif
828 u32 cmd_sn;
829 u32 exp_stat_sn;
830 u32 reserved4;
831 u32 resp_bd_list_addr_lo;
832 u32 resp_bd_list_addr_hi;
833 u32 resp_buffer;
834#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
835#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
836#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
837#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
838#if defined(__BIG_ENDIAN)
839 u16 reserved8;
840 u8 reserved7;
841 u8 flags;
842#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
843#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
844#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
845#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
846#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
847#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
848#elif defined(__LITTLE_ENDIAN)
849 u8 flags;
850#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
851#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
852#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
853#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
854#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
855#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
856 u8 reserved7;
857 u16 reserved8;
858#endif
859 u32 bd_list_addr_lo;
860 u32 bd_list_addr_hi;
861#if defined(__BIG_ENDIAN)
862 u8 cq_index;
863 u8 reserved10;
864 u8 reserved9;
865 u8 num_bds;
866#elif defined(__LITTLE_ENDIAN)
867 u8 num_bds;
868 u8 reserved9;
869 u8 reserved10;
870 u8 cq_index;
871#endif
872};
873
874
875/*
876 * iSCSI Login CQE
877 */
878struct bnx2i_login_response {
879#if defined(__BIG_ENDIAN)
880 u8 op_code;
881 u8 response_flags;
882#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
883#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
884#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
885#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
886#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
887#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
888#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
889#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
890#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
891#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
892 u8 version_max;
893 u8 version_active;
894#elif defined(__LITTLE_ENDIAN)
895 u8 version_active;
896 u8 version_max;
897 u8 response_flags;
898#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
899#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
900#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
901#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
902#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
903#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
904#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
905#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
906#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
907#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
908 u8 op_code;
909#endif
910 u32 data_length;
911 u32 exp_cmd_sn;
912 u32 max_cmd_sn;
913 u32 reserved1[2];
914#if defined(__BIG_ENDIAN)
915 u16 reserved3;
916 u8 err_code;
917 u8 reserved2;
918#elif defined(__LITTLE_ENDIAN)
919 u8 reserved2;
920 u8 err_code;
921 u16 reserved3;
922#endif
923 u32 stat_sn;
924 u32 isid_lo;
925#if defined(__BIG_ENDIAN)
926 u16 isid_hi;
927 u16 tsih;
928#elif defined(__LITTLE_ENDIAN)
929 u16 tsih;
930 u16 isid_hi;
931#endif
932#if defined(__BIG_ENDIAN)
933 u8 status_class;
934 u8 status_detail;
935 u16 reserved4;
936#elif defined(__LITTLE_ENDIAN)
937 u16 reserved4;
938 u8 status_detail;
939 u8 status_class;
940#endif
941 u32 reserved5[3];
942#if defined(__BIG_ENDIAN)
943 u16 reserved6;
944 u16 itt;
945#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
946#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
947#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
948#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
949#elif defined(__LITTLE_ENDIAN)
950 u16 itt;
951#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
952#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
953#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
954#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
955 u16 reserved6;
956#endif
957 u32 cq_req_sn;
958};
959
960
961/*
962 * iSCSI Logout SQ WQE
963 */
964struct bnx2i_logout_request {
965#if defined(__BIG_ENDIAN)
966 u8 op_code;
967 u8 op_attr;
968#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
969#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
970#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
971#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
972 u16 reserved0;
973#elif defined(__LITTLE_ENDIAN)
974 u16 reserved0;
975 u8 op_attr;
976#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
977#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
978#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
979#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
980 u8 op_code;
981#endif
982 u32 data_length;
983 u32 reserved1[2];
984#if defined(__BIG_ENDIAN)
985 u16 reserved2;
986 u16 itt;
987#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
988#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
989#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
990#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
991#elif defined(__LITTLE_ENDIAN)
992 u16 itt;
993#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
994#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
995#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
996#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
997 u16 reserved2;
998#endif
999#if defined(__BIG_ENDIAN)
1000 u16 cid;
1001 u16 reserved3;
1002#elif defined(__LITTLE_ENDIAN)
1003 u16 reserved3;
1004 u16 cid;
1005#endif
1006 u32 cmd_sn;
1007 u32 reserved4[5];
1008 u32 zero_fill;
1009 u32 bd_list_addr_lo;
1010 u32 bd_list_addr_hi;
1011#if defined(__BIG_ENDIAN)
1012 u8 cq_index;
1013 u8 reserved6;
1014 u8 reserved5;
1015 u8 num_bds;
1016#elif defined(__LITTLE_ENDIAN)
1017 u8 num_bds;
1018 u8 reserved5;
1019 u8 reserved6;
1020 u8 cq_index;
1021#endif
1022};
1023
1024
1025/*
1026 * iSCSI Logout CQE
1027 */
1028struct bnx2i_logout_response {
1029#if defined(__BIG_ENDIAN)
1030 u8 op_code;
1031 u8 reserved1;
1032 u8 response;
1033 u8 reserved0;
1034#elif defined(__LITTLE_ENDIAN)
1035 u8 reserved0;
1036 u8 response;
1037 u8 reserved1;
1038 u8 op_code;
1039#endif
1040 u32 reserved2;
1041 u32 exp_cmd_sn;
1042 u32 max_cmd_sn;
1043 u32 reserved3[2];
1044#if defined(__BIG_ENDIAN)
1045 u16 reserved5;
1046 u8 err_code;
1047 u8 reserved4;
1048#elif defined(__LITTLE_ENDIAN)
1049 u8 reserved4;
1050 u8 err_code;
1051 u16 reserved5;
1052#endif
1053 u32 reserved6[3];
1054#if defined(__BIG_ENDIAN)
1055 u16 time_to_wait;
1056 u16 time_to_retain;
1057#elif defined(__LITTLE_ENDIAN)
1058 u16 time_to_retain;
1059 u16 time_to_wait;
1060#endif
1061 u32 reserved7[3];
1062#if defined(__BIG_ENDIAN)
1063 u16 reserved8;
1064 u16 itt;
1065#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1066#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1067#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1068#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1069#elif defined(__LITTLE_ENDIAN)
1070 u16 itt;
1071#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1072#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1073#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1074#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1075 u16 reserved8;
1076#endif
1077 u32 cq_req_sn;
1078};
1079
1080
1081/*
1082 * iSCSI Nop-In CQE
1083 */
1084struct bnx2i_nop_in_msg {
1085#if defined(__BIG_ENDIAN)
1086 u8 op_code;
1087 u8 reserved1;
1088 u16 reserved0;
1089#elif defined(__LITTLE_ENDIAN)
1090 u16 reserved0;
1091 u8 reserved1;
1092 u8 op_code;
1093#endif
1094 u32 data_length;
1095 u32 exp_cmd_sn;
1096 u32 max_cmd_sn;
1097 u32 ttt;
1098 u32 reserved2;
1099#if defined(__BIG_ENDIAN)
1100 u16 reserved4;
1101 u8 err_code;
1102 u8 reserved3;
1103#elif defined(__LITTLE_ENDIAN)
1104 u8 reserved3;
1105 u8 err_code;
1106 u16 reserved4;
1107#endif
1108 u32 reserved5;
1109 u32 lun[2];
1110 u32 reserved6[4];
1111#if defined(__BIG_ENDIAN)
1112 u16 reserved7;
1113 u16 itt;
1114#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1115#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1116#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1117#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1118#elif defined(__LITTLE_ENDIAN)
1119 u16 itt;
1120#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1121#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1122#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1123#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1124 u16 reserved7;
1125#endif
1126 u32 cq_req_sn;
1127};
1128
1129
1130/*
1131 * iSCSI NOP-OUT SQ WQE
1132 */
1133struct bnx2i_nop_out_request {
1134#if defined(__BIG_ENDIAN)
1135 u8 op_code;
1136 u8 op_attr;
1137#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1138#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1139#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1140#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1141 u16 reserved0;
1142#elif defined(__LITTLE_ENDIAN)
1143 u16 reserved0;
1144 u8 op_attr;
1145#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1146#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1147#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1148#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1149 u8 op_code;
1150#endif
1151 u32 data_length;
1152 u32 lun[2];
1153#if defined(__BIG_ENDIAN)
1154 u16 reserved2;
1155 u16 itt;
1156#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1157#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1158#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1159#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1160#elif defined(__LITTLE_ENDIAN)
1161 u16 itt;
1162#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1163#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1164#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1165#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1166 u16 reserved2;
1167#endif
1168 u32 ttt;
1169 u32 cmd_sn;
1170 u32 reserved3[2];
1171 u32 resp_bd_list_addr_lo;
1172 u32 resp_bd_list_addr_hi;
1173 u32 resp_buffer;
1174#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1175#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1176#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1177#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
1178#if defined(__BIG_ENDIAN)
1179 u16 reserved7;
1180 u8 reserved6;
1181 u8 flags;
1182#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1183#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1184#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1185#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1186#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1187#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1188#elif defined(__LITTLE_ENDIAN)
1189 u8 flags;
1190#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1191#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1192#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1193#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1194#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1195#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1196 u8 reserved6;
1197 u16 reserved7;
1198#endif
1199 u32 bd_list_addr_lo;
1200 u32 bd_list_addr_hi;
1201#if defined(__BIG_ENDIAN)
1202 u8 cq_index;
1203 u8 reserved9;
1204 u8 reserved8;
1205 u8 num_bds;
1206#elif defined(__LITTLE_ENDIAN)
1207 u8 num_bds;
1208 u8 reserved8;
1209 u8 reserved9;
1210 u8 cq_index;
1211#endif
1212};
1213
1214/*
1215 * iSCSI Reject CQE
1216 */
1217struct bnx2i_reject_msg {
1218#if defined(__BIG_ENDIAN)
1219 u8 op_code;
1220 u8 reserved1;
1221 u8 reason;
1222 u8 reserved0;
1223#elif defined(__LITTLE_ENDIAN)
1224 u8 reserved0;
1225 u8 reason;
1226 u8 reserved1;
1227 u8 op_code;
1228#endif
1229 u32 data_length;
1230 u32 exp_cmd_sn;
1231 u32 max_cmd_sn;
1232 u32 reserved2[2];
1233#if defined(__BIG_ENDIAN)
1234 u16 reserved4;
1235 u8 err_code;
1236 u8 reserved3;
1237#elif defined(__LITTLE_ENDIAN)
1238 u8 reserved3;
1239 u8 err_code;
1240 u16 reserved4;
1241#endif
1242 u32 reserved5[8];
1243 u32 cq_req_sn;
1244};
1245
1246/*
1247 * bnx2i iSCSI TMF SQ WQE
1248 */
1249struct bnx2i_tmf_request {
1250#if defined(__BIG_ENDIAN)
1251 u8 op_code;
1252 u8 op_attr;
1253#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1254#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1255#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1256#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1257 u16 reserved0;
1258#elif defined(__LITTLE_ENDIAN)
1259 u16 reserved0;
1260 u8 op_attr;
1261#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1262#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1263#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1264#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1265 u8 op_code;
1266#endif
1267 u32 data_length;
1268 u32 lun[2];
1269#if defined(__BIG_ENDIAN)
1270 u16 reserved1;
1271 u16 itt;
1272#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1273#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1274#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1275#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1276#elif defined(__LITTLE_ENDIAN)
1277 u16 itt;
1278#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1279#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1280#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1281#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1282 u16 reserved1;
1283#endif
1284 u32 ref_itt;
1285 u32 cmd_sn;
1286 u32 reserved2;
1287 u32 ref_cmd_sn;
1288 u32 reserved3[3];
1289 u32 zero_fill;
1290 u32 bd_list_addr_lo;
1291 u32 bd_list_addr_hi;
1292#if defined(__BIG_ENDIAN)
1293 u8 cq_index;
1294 u8 reserved5;
1295 u8 reserved4;
1296 u8 num_bds;
1297#elif defined(__LITTLE_ENDIAN)
1298 u8 num_bds;
1299 u8 reserved4;
1300 u8 reserved5;
1301 u8 cq_index;
1302#endif
1303};
1304
1305/*
1306 * iSCSI Text SQ WQE
1307 */
1308struct bnx2i_text_request {
1309#if defined(__BIG_ENDIAN)
1310 u8 op_code;
1311 u8 op_attr;
1312#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1313#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1314#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1315#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1316#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1317#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1318 u16 reserved0;
1319#elif defined(__LITTLE_ENDIAN)
1320 u16 reserved0;
1321 u8 op_attr;
1322#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1323#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1324#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1325#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1326#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1327#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1328 u8 op_code;
1329#endif
1330 u32 data_length;
1331 u32 lun[2];
1332#if defined(__BIG_ENDIAN)
1333 u16 reserved3;
1334 u16 itt;
1335#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1336#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1337#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1338#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1339#elif defined(__LITTLE_ENDIAN)
1340 u16 itt;
1341#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1342#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1343#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1344#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1345 u16 reserved3;
1346#endif
1347 u32 ttt;
1348 u32 cmd_sn;
1349 u32 reserved4[2];
1350 u32 resp_bd_list_addr_lo;
1351 u32 resp_bd_list_addr_hi;
1352 u32 resp_buffer;
1353#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1354#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1355#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1356#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
1357 u32 zero_fill;
1358 u32 bd_list_addr_lo;
1359 u32 bd_list_addr_hi;
1360#if defined(__BIG_ENDIAN)
1361 u8 cq_index;
1362 u8 reserved7;
1363 u8 reserved6;
1364 u8 num_bds;
1365#elif defined(__LITTLE_ENDIAN)
1366 u8 num_bds;
1367 u8 reserved6;
1368 u8 reserved7;
1369 u8 cq_index;
1370#endif
1371};
1372
1373/*
1374 * iSCSI SQ WQE
1375 */
1376union iscsi_request {
1377 struct bnx2i_cmd_request cmd;
1378 struct bnx2i_tmf_request tmf;
1379 struct bnx2i_nop_out_request nop_out;
1380 struct bnx2i_login_request login_req;
1381 struct bnx2i_text_request text;
1382 struct bnx2i_logout_request logout_req;
1383 struct bnx2i_cleanup_request cleanup;
1384};
1385
1386
1387/*
1388 * iSCSI TMF CQE
1389 */
1390struct bnx2i_tmf_response {
1391#if defined(__BIG_ENDIAN)
1392 u8 op_code;
1393 u8 reserved1;
1394 u8 response;
1395 u8 reserved0;
1396#elif defined(__LITTLE_ENDIAN)
1397 u8 reserved0;
1398 u8 response;
1399 u8 reserved1;
1400 u8 op_code;
1401#endif
1402 u32 reserved2;
1403 u32 exp_cmd_sn;
1404 u32 max_cmd_sn;
1405 u32 reserved3[2];
1406#if defined(__BIG_ENDIAN)
1407 u16 reserved5;
1408 u8 err_code;
1409 u8 reserved4;
1410#elif defined(__LITTLE_ENDIAN)
1411 u8 reserved4;
1412 u8 err_code;
1413 u16 reserved5;
1414#endif
1415 u32 reserved6[7];
1416#if defined(__BIG_ENDIAN)
1417 u16 reserved7;
1418 u16 itt;
1419#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1420#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1421#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1422#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1423#elif defined(__LITTLE_ENDIAN)
1424 u16 itt;
1425#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1426#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1427#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1428#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1429 u16 reserved7;
1430#endif
1431 u32 cq_req_sn;
1432};
1433
1434/*
1435 * iSCSI Text CQE
1436 */
1437struct bnx2i_text_response {
1438#if defined(__BIG_ENDIAN)
1439 u8 op_code;
1440 u8 response_flags;
1441#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1442#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1443#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1444#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1445#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1446#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1447 u16 reserved0;
1448#elif defined(__LITTLE_ENDIAN)
1449 u16 reserved0;
1450 u8 response_flags;
1451#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1452#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1453#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1454#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1455#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1456#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1457 u8 op_code;
1458#endif
1459 u32 data_length;
1460 u32 exp_cmd_sn;
1461 u32 max_cmd_sn;
1462 u32 ttt;
1463 u32 reserved2;
1464#if defined(__BIG_ENDIAN)
1465 u16 reserved4;
1466 u8 err_code;
1467 u8 reserved3;
1468#elif defined(__LITTLE_ENDIAN)
1469 u8 reserved3;
1470 u8 err_code;
1471 u16 reserved4;
1472#endif
1473 u32 reserved5;
1474 u32 lun[2];
1475 u32 reserved6[4];
1476#if defined(__BIG_ENDIAN)
1477 u16 reserved7;
1478 u16 itt;
1479#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1480#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1481#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1482#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1483#elif defined(__LITTLE_ENDIAN)
1484 u16 itt;
1485#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1486#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1487#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1488#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1489 u16 reserved7;
1490#endif
1491 u32 cq_req_sn;
1492};
1493
1494/*
1495 * iSCSI CQE
1496 */
1497union iscsi_response {
1498 struct bnx2i_cmd_response cmd;
1499 struct bnx2i_tmf_response tmf;
1500 struct bnx2i_login_response login_resp;
1501 struct bnx2i_text_response text;
1502 struct bnx2i_logout_response logout_resp;
1503 struct bnx2i_cleanup_response cleanup;
1504 struct bnx2i_reject_msg reject;
1505 struct bnx2i_async_msg async;
1506 struct bnx2i_nop_in_msg nop_in;
1507};
1508
1509#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 00000000000..820d428ae83
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS
4 select CNIC
5 ---help---
6 This driver supports iSCSI offload for the Broadcom NetXtreme II
7 devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 00000000000..b5802bd2e76
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
1bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
2
3obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 00000000000..d7576f28c6e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#ifndef _BNX2I_H_
15#define _BNX2I_H_
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/in.h>
26#include <linux/kfifo.h>
27#include <linux/netdevice.h>
28#include <linux/completion.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi.h>
35#include <scsi/iscsi_proto.h>
36#include <scsi/libiscsi.h>
37#include <scsi/scsi_transport_iscsi.h>
38
39#include "../../net/cnic_if.h"
40#include "57xx_iscsi_hsi.h"
41#include "57xx_iscsi_constants.h"
42
43#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
44
45#define BNX2I_MAX_ADAPTERS 8
46
47#define ISCSI_MAX_CONNS_PER_HBA 128
48#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
49#define ISCSI_MAX_CMDS_PER_SESS 128
50
51/* Total active commands across all connections supported by devices */
52#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
53#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
54#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
55
56#define ISCSI_MAX_BDS_PER_CMD 32
57
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768
64
65/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
66#define BNX2I_SQ_WQES_MIN 16
67#define BNX2I_570X_SQ_WQES_MAX 128
68#define BNX2I_5770X_SQ_WQES_MAX 512
69#define BNX2I_570X_SQ_WQES_DEFAULT 128
70#define BNX2I_5770X_SQ_WQES_DEFAULT 256
71
72#define BNX2I_570X_CQ_WQES_MAX 128
73#define BNX2I_5770X_CQ_WQES_MAX 512
74
75#define BNX2I_RQ_WQES_MIN 16
76#define BNX2I_RQ_WQES_MAX 32
77#define BNX2I_RQ_WQES_DEFAULT 16
78
79/* CCELLs per conn */
80#define BNX2I_CCELLS_MIN 16
81#define BNX2I_CCELLS_MAX 96
82#define BNX2I_CCELLS_DEFAULT 64
83
84#define ITT_INVALID_SIGNATURE 0xFFFF
85
86#define ISCSI_CMD_CLEANUP_TIMEOUT 100
87
88#define BNX2I_CONN_CTX_BUF_SIZE 16384
89
90#define BNX2I_SQ_WQE_SIZE 64
91#define BNX2I_RQ_WQE_SIZE 256
92#define BNX2I_CQE_SIZE 64
93
94#define MB_KERNEL_CTX_SHIFT 8
95#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
96
97#define CTX_SHIFT 7
98#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
99
100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000
102
103/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
106#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
107
108/* 57710's BAR2 is mapped to doorbell registers */
109#define BNX2X_DOORBELL_PCI_BAR 2
110#define BNX2X_MAX_CQS 8
111
112#define CNIC_ARM_CQE 1
113#define CNIC_DISARM_CQE 0
114
115#define REG_RD(__hba, offset) \
116 readl(__hba->regview + offset)
117#define REG_WR(__hba, offset, val) \
118 writel(val, __hba->regview + offset)
119
120
121/**
122 * struct generic_pdu_resc - login pdu resource structure
123 *
124 * @req_buf: driver buffer used to stage payload associated with
125 * the login request
126 * @req_dma_addr: dma address for iscsi login request payload buffer
127 * @req_buf_size: actual login request payload length
128 * @req_wr_ptr: pointer into login request buffer when next data is
129 * to be written
130 * @resp_hdr: iscsi header where iscsi login response header is to
131 * be recreated
132 * @resp_buf: buffer to stage login response payload
133 * @resp_dma_addr: login response payload buffer dma address
134 * @resp_buf_size: login response paylod length
135 * @resp_wr_ptr: pointer into login response buffer when next data is
136 * to be written
137 * @req_bd_tbl: iscsi login request payload BD table
138 * @req_bd_dma: login request BD table dma address
139 * @resp_bd_tbl: iscsi login response payload BD table
140 * @resp_bd_dma: login request BD table dma address
141 *
142 * following structure defines buffer info for generic pdus such as iSCSI Login,
143 * Logout and NOP
144 */
145struct generic_pdu_resc {
146 char *req_buf;
147 dma_addr_t req_dma_addr;
148 u32 req_buf_size;
149 char *req_wr_ptr;
150 struct iscsi_hdr resp_hdr;
151 char *resp_buf;
152 dma_addr_t resp_dma_addr;
153 u32 resp_buf_size;
154 char *resp_wr_ptr;
155 char *req_bd_tbl;
156 dma_addr_t req_bd_dma;
157 char *resp_bd_tbl;
158 dma_addr_t resp_bd_dma;
159};
160
161
162/**
163 * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
164 *
165 * @link: list head to link elements
166 * @max_ptrs: maximun pointers that can be stored in this page
167 * @num_valid: number of pointer valid in this page
168 * @page: base addess for page pointer array
169 *
170 * structure to track DMA'able memory allocated for command BD tables
171 */
172struct bd_resc_page {
173 struct list_head link;
174 u32 max_ptrs;
175 u32 num_valid;
176 void *page[1];
177};
178
179
180/**
181 * struct io_bdt - I/O buffer destricptor table
182 *
183 * @bd_tbl: BD table's virtual address
184 * @bd_tbl_dma: BD table's dma address
185 * @bd_valid: num valid BD entries
186 *
187 * IO BD table
188 */
189struct io_bdt {
190 struct iscsi_bd *bd_tbl;
191 dma_addr_t bd_tbl_dma;
192 u16 bd_valid;
193};
194
195
196/**
197 * bnx2i_cmd - iscsi command structure
198 *
199 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
200 * @sg: SG list
201 * @io_tbl: buffer descriptor (BD) table
202 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
203 */
204struct bnx2i_cmd {
205 struct iscsi_hdr hdr;
206 struct bnx2i_conn *conn;
207 struct scsi_cmnd *scsi_cmd;
208 struct scatterlist *sg;
209 struct io_bdt io_tbl;
210 dma_addr_t bd_tbl_dma;
211 struct bnx2i_cmd_request req;
212};
213
214
215/**
216 * struct bnx2i_conn - iscsi connection structure
217 *
218 * @cls_conn: pointer to iscsi cls conn
219 * @hba: adapter structure pointer
220 * @iscsi_conn_cid: iscsi conn id
221 * @fw_cid: firmware iscsi context id
222 * @ep: endpoint structure pointer
223 * @gen_pdu: login/nopout/logout pdu resources
224 * @violation_notified: bit mask used to track iscsi error/warning messages
225 * already printed out
226 *
227 * iSCSI connection structure
228 */
229struct bnx2i_conn {
230 struct iscsi_cls_conn *cls_conn;
231 struct bnx2i_hba *hba;
232 struct completion cmd_cleanup_cmpl;
233 int is_bound;
234
235 u32 iscsi_conn_cid;
236#define BNX2I_CID_RESERVED 0x5AFF
237 u32 fw_cid;
238
239 struct timer_list poll_timer;
240 /*
241 * Queue Pair (QP) related structure elements.
242 */
243 struct bnx2i_endpoint *ep;
244
245 /*
246 * Buffer for login negotiation process
247 */
248 struct generic_pdu_resc gen_pdu;
249 u64 violation_notified;
250};
251
252
253
254/**
255 * struct iscsi_cid_queue - Per adapter iscsi cid queue
256 *
257 * @cid_que_base: queue base memory
258 * @cid_que: queue memory pointer
259 * @cid_q_prod_idx: produce index
260 * @cid_q_cons_idx: consumer index
261 * @cid_q_max_idx: max index. used to detect wrap around condition
262 * @cid_free_cnt: queue size
263 * @conn_cid_tbl: iscsi cid to conn structure mapping table
264 *
265 * Per adapter iSCSI CID Queue
266 */
267struct iscsi_cid_queue {
268 void *cid_que_base;
269 u32 *cid_que;
270 u32 cid_q_prod_idx;
271 u32 cid_q_cons_idx;
272 u32 cid_q_max_idx;
273 u32 cid_free_cnt;
274 struct bnx2i_conn **conn_cid_tbl;
275};
276
277/**
278 * struct bnx2i_hba - bnx2i adapter structure
279 *
280 * @link: list head to link elements
281 * @cnic: pointer to cnic device
282 * @pcidev: pointer to pci dev
283 * @netdev: pointer to netdev structure
284 * @regview: mapped PCI register space
285 * @age: age, incremented by every recovery
286 * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
287 * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
288 * @reg_with_cnic: indicates whether the device is register with CNIC
289 * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
290 * @mtu_supported: Ethernet MTU supported
291 * @shost: scsi host pointer
292 * @max_sqes: SQ size
293 * @max_rqes: RQ size
294 * @max_cqes: CQ size
295 * @num_ccell: number of command cells per connection
296 * @ofld_conns_active: active connection list
297 * @max_active_conns: max offload connections supported by this device
298 * @cid_que: iscsi cid queue
299 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
300 * @ep_ofld_list: connection list for pending offload completion
301 * @ep_destroy_list: connection list for pending offload completion
302 * @mp_bd_tbl: BD table to be used with middle path requests
303 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
304 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
305 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
306 * @lock: lock to synchonize access to hba structure
307 * @pci_did: PCI device ID
308 * @pci_vid: PCI vendor ID
309 * @pci_sdid: PCI subsystem device ID
310 * @pci_svid: PCI subsystem vendor ID
311 * @pci_func: PCI function number in system pci tree
312 * @pci_devno: PCI device number in system pci tree
313 * @num_wqe_sent: statistic counter, total wqe's sent
314 * @num_cqe_rcvd: statistic counter, total cqe's received
315 * @num_intr_claimed: statistic counter, total interrupts claimed
316 * @link_changed_count: statistic counter, num of link change notifications
317 * received
318 * @ipaddr_changed_count: statistic counter, num times IP address changed while
319 * at least one connection is offloaded
320 * @num_sess_opened: statistic counter, total num sessions opened
321 * @num_conn_opened: statistic counter, total num conns opened on this hba
322 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
323 * currently offloaded connection, used to decode
324 * context memory
325 *
326 * Adapter Data Structure
327 */
328struct bnx2i_hba {
329 struct list_head link;
330 struct cnic_dev *cnic;
331 struct pci_dev *pcidev;
332 struct net_device *netdev;
333 void __iomem *regview;
334
335 u32 age;
336 unsigned long cnic_dev_type;
337 #define BNX2I_NX2_DEV_5706 0x0
338 #define BNX2I_NX2_DEV_5708 0x1
339 #define BNX2I_NX2_DEV_5709 0x2
340 #define BNX2I_NX2_DEV_57710 0x3
341 u32 mail_queue_access;
342 #define BNX2I_MQ_KERNEL_MODE 0x0
343 #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
344 #define BNX2I_MQ_BIN_MODE 0x2
345 unsigned long reg_with_cnic;
346 #define BNX2I_CNIC_REGISTERED 1
347
348 unsigned long adapter_state;
349 #define ADAPTER_STATE_UP 0
350 #define ADAPTER_STATE_GOING_DOWN 1
351 #define ADAPTER_STATE_LINK_DOWN 2
352 #define ADAPTER_STATE_INIT_FAILED 31
353 unsigned int mtu_supported;
354 #define BNX2I_MAX_MTU_SUPPORTED 1500
355
356 struct Scsi_Host *shost;
357
358 u32 max_sqes;
359 u32 max_rqes;
360 u32 max_cqes;
361 u32 num_ccell;
362
363 int ofld_conns_active;
364
365 int max_active_conns;
366 struct iscsi_cid_queue cid_que;
367
368 rwlock_t ep_rdwr_lock;
369 struct list_head ep_ofld_list;
370 struct list_head ep_destroy_list;
371
372 /*
373 * BD table to be used with MP (Middle Path requests.
374 */
375 char *mp_bd_tbl;
376 dma_addr_t mp_bd_dma;
377 char *dummy_buffer;
378 dma_addr_t dummy_buf_dma;
379
380 spinlock_t lock; /* protects hba structure access */
381 struct mutex net_dev_lock;/* sync net device access */
382
383 /*
384 * PCI related info.
385 */
386 u16 pci_did;
387 u16 pci_vid;
388 u16 pci_sdid;
389 u16 pci_svid;
390 u16 pci_func;
391 u16 pci_devno;
392
393 /*
394 * Following are a bunch of statistics useful during development
395 * and later stage for score boarding.
396 */
397 u32 num_wqe_sent;
398 u32 num_cqe_rcvd;
399 u32 num_intr_claimed;
400 u32 link_changed_count;
401 u32 ipaddr_changed_count;
402 u32 num_sess_opened;
403 u32 num_conn_opened;
404 unsigned int ctx_ccell_tasks;
405};
406
407
408/*******************************************************************************
409 * QP [ SQ / RQ / CQ ] info.
410 ******************************************************************************/
411
412/*
413 * SQ/RQ/CQ generic structure definition
414 */
415struct sqe {
416 u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
417};
418
419struct rqe {
420 u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
421};
422
423struct cqe {
424 u8 cqe_byte[BNX2I_CQE_SIZE];
425};
426
427
428enum {
429#if defined(__LITTLE_ENDIAN)
430 CNIC_EVENT_COAL_INDEX = 0x0,
431 CNIC_SEND_DOORBELL = 0x4,
432 CNIC_EVENT_CQ_ARM = 0x7,
433 CNIC_RECV_DOORBELL = 0x8
434#elif defined(__BIG_ENDIAN)
435 CNIC_EVENT_COAL_INDEX = 0x2,
436 CNIC_SEND_DOORBELL = 0x6,
437 CNIC_EVENT_CQ_ARM = 0x4,
438 CNIC_RECV_DOORBELL = 0xa
439#endif
440};
441
442
443/*
444 * CQ DB
445 */
446struct bnx2x_iscsi_cq_pend_cmpl {
447 /* CQ producer, updated by Ustorm */
448 u16 ustrom_prod;
449 /* CQ pending completion counter */
450 u16 pend_cntr;
451};
452
453
454struct bnx2i_5771x_cq_db {
455 struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
456 /* CQ pending completion ITT array */
457 u16 itt[BNX2X_MAX_CQS];
458 /* Cstorm CQ sequence to notify array, updated by driver */;
459 u16 sqn[BNX2X_MAX_CQS];
460 u32 reserved[4] /* 16 byte allignment */;
461};
462
463
464struct bnx2i_5771x_sq_rq_db {
465 u16 prod_idx;
466 u8 reserved0[14]; /* Pad structure size to 16 bytes */
467};
468
469
470struct bnx2i_5771x_dbell_hdr {
471 u8 header;
472 /* 1 for rx doorbell, 0 for tx doorbell */
473#define B577XX_DOORBELL_HDR_RX (0x1<<0)
474#define B577XX_DOORBELL_HDR_RX_SHIFT 0
475 /* 0 for normal doorbell, 1 for advertise wnd doorbell */
476#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
477#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
478 /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
479#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
480#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
481 /* connection type */
482#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
483#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
484};
485
486struct bnx2i_5771x_dbell {
487 struct bnx2i_5771x_dbell_hdr dbell;
488 u8 pad[3];
489
490};
491
492/**
493 * struct qp_info - QP (share queue region) atrributes structure
494 *
495 * @ctx_base: ioremapped pci register base to access doorbell register
496 * pertaining to this offloaded connection
497 * @sq_virt: virtual address of send queue (SQ) region
498 * @sq_phys: DMA address of SQ memory region
499 * @sq_mem_size: SQ size
500 * @sq_prod_qe: SQ producer entry pointer
501 * @sq_cons_qe: SQ consumer entry pointer
502 * @sq_first_qe: virtaul address of first entry in SQ
503 * @sq_last_qe: virtaul address of last entry in SQ
504 * @sq_prod_idx: SQ producer index
505 * @sq_cons_idx: SQ consumer index
506 * @sqe_left: number sq entry left
507 * @sq_pgtbl_virt: page table describing buffer consituting SQ region
508 * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
509 * @sq_pgtbl_size: SQ page table size
510 * @cq_virt: virtual address of completion queue (CQ) region
511 * @cq_phys: DMA address of RQ memory region
512 * @cq_mem_size: CQ size
513 * @cq_prod_qe: CQ producer entry pointer
514 * @cq_cons_qe: CQ consumer entry pointer
515 * @cq_first_qe: virtaul address of first entry in CQ
516 * @cq_last_qe: virtaul address of last entry in CQ
517 * @cq_prod_idx: CQ producer index
518 * @cq_cons_idx: CQ consumer index
519 * @cqe_left: number cq entry left
520 * @cqe_size: size of each CQ entry
521 * @cqe_exp_seq_sn: next expected CQE sequence number
522 * @cq_pgtbl_virt: page table describing buffer consituting CQ region
523 * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
524 * @cq_pgtbl_size: CQ page table size
525 * @rq_virt: virtual address of receive queue (RQ) region
526 * @rq_phys: DMA address of RQ memory region
527 * @rq_mem_size: RQ size
528 * @rq_prod_qe: RQ producer entry pointer
529 * @rq_cons_qe: RQ consumer entry pointer
530 * @rq_first_qe: virtaul address of first entry in RQ
531 * @rq_last_qe: virtaul address of last entry in RQ
532 * @rq_prod_idx: RQ producer index
533 * @rq_cons_idx: RQ consumer index
534 * @rqe_left: number rq entry left
535 * @rq_pgtbl_virt: page table describing buffer consituting RQ region
536 * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
537 * @rq_pgtbl_size: RQ page table size
538 *
539 * queue pair (QP) is a per connection shared data structure which is used
540 * to send work requests (SQ), receive completion notifications (CQ)
541 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
542 * below holds queue memory, consumer/producer indexes and page table
543 * information
544 */
545struct qp_info {
546 void __iomem *ctx_base;
547#define DPM_TRIGER_TYPE 0x40
548
549#define BNX2I_570x_QUE_DB_SIZE 0
550#define BNX2I_5771x_QUE_DB_SIZE 16
551 struct sqe *sq_virt;
552 dma_addr_t sq_phys;
553 u32 sq_mem_size;
554
555 struct sqe *sq_prod_qe;
556 struct sqe *sq_cons_qe;
557 struct sqe *sq_first_qe;
558 struct sqe *sq_last_qe;
559 u16 sq_prod_idx;
560 u16 sq_cons_idx;
561 u32 sqe_left;
562
563 void *sq_pgtbl_virt;
564 dma_addr_t sq_pgtbl_phys;
565 u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
566
567 struct cqe *cq_virt;
568 dma_addr_t cq_phys;
569 u32 cq_mem_size;
570
571 struct cqe *cq_prod_qe;
572 struct cqe *cq_cons_qe;
573 struct cqe *cq_first_qe;
574 struct cqe *cq_last_qe;
575 u16 cq_prod_idx;
576 u16 cq_cons_idx;
577 u32 cqe_left;
578 u32 cqe_size;
579 u32 cqe_exp_seq_sn;
580
581 void *cq_pgtbl_virt;
582 dma_addr_t cq_pgtbl_phys;
583 u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
584
585 struct rqe *rq_virt;
586 dma_addr_t rq_phys;
587 u32 rq_mem_size;
588
589 struct rqe *rq_prod_qe;
590 struct rqe *rq_cons_qe;
591 struct rqe *rq_first_qe;
592 struct rqe *rq_last_qe;
593 u16 rq_prod_idx;
594 u16 rq_cons_idx;
595 u32 rqe_left;
596
597 void *rq_pgtbl_virt;
598 dma_addr_t rq_pgtbl_phys;
599 u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
600};
601
602
603
604/*
605 * CID handles
606 */
607struct ep_handles {
608 u32 fw_cid;
609 u32 drv_iscsi_cid;
610 u16 pg_cid;
611 u16 rsvd;
612};
613
614
615enum {
616 EP_STATE_IDLE = 0x0,
617 EP_STATE_PG_OFLD_START = 0x1,
618 EP_STATE_PG_OFLD_COMPL = 0x2,
619 EP_STATE_OFLD_START = 0x4,
620 EP_STATE_OFLD_COMPL = 0x8,
621 EP_STATE_CONNECT_START = 0x10,
622 EP_STATE_CONNECT_COMPL = 0x20,
623 EP_STATE_ULP_UPDATE_START = 0x40,
624 EP_STATE_ULP_UPDATE_COMPL = 0x80,
625 EP_STATE_DISCONN_START = 0x100,
626 EP_STATE_DISCONN_COMPL = 0x200,
627 EP_STATE_CLEANUP_START = 0x400,
628 EP_STATE_CLEANUP_CMPL = 0x800,
629 EP_STATE_TCP_FIN_RCVD = 0x1000,
630 EP_STATE_TCP_RST_RCVD = 0x2000,
631 EP_STATE_PG_OFLD_FAILED = 0x1000000,
632 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
633 EP_STATE_CLEANUP_FAILED = 0x4000000,
634 EP_STATE_OFLD_FAILED = 0x8000000,
635 EP_STATE_CONNECT_FAILED = 0x10000000,
636 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
637};
638
639/**
640 * struct bnx2i_endpoint - representation of tcp connection in NX2 world
641 *
642 * @link: list head to link elements
643 * @hba: adapter to which this connection belongs
644 * @conn: iscsi connection this EP is linked to
645 * @sess: iscsi session this EP is linked to
646 * @cm_sk: cnic sock struct
647 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
648 * after HBA reset is completed by bnx2i/cnic/bnx2
649 * modules
650 * @state: tracks offload connection state machine
651 * @teardown_mode: indicates if conn teardown is abortive or orderly
652 * @qp: QP information
653 * @ids: contains chip allocated *context id* & driver assigned
654 * *iscsi cid*
655 * @ofld_timer: offload timer to detect timeout
656 * @ofld_wait: wait queue
657 *
658 * Endpoint Structure - equivalent of tcp socket structure
659 */
660struct bnx2i_endpoint {
661 struct list_head link;
662 struct bnx2i_hba *hba;
663 struct bnx2i_conn *conn;
664 struct cnic_sock *cm_sk;
665 u32 hba_age;
666 u32 state;
667 unsigned long timestamp;
668 int num_active_cmds;
669
670 struct qp_info qp;
671 struct ep_handles ids;
672 #define ep_iscsi_cid ids.drv_iscsi_cid
673 #define ep_cid ids.fw_cid
674 #define ep_pg_cid ids.pg_cid
675 struct timer_list ofld_timer;
676 wait_queue_head_t ofld_wait;
677};
678
679
680
681/* Global variables */
682extern unsigned int error_mask1, error_mask2;
683extern u64 iscsi_error_mask;
684extern unsigned int en_tcp_dack;
685extern unsigned int event_coal_div;
686
687extern struct scsi_transport_template *bnx2i_scsi_xport_template;
688extern struct iscsi_transport bnx2i_iscsi_transport;
689extern struct cnic_ulp_ops bnx2i_cnic_cb;
690
691extern unsigned int sq_size;
692extern unsigned int rq_size;
693
694extern struct device_attribute *bnx2i_dev_attributes[];
695
696
697
698/*
699 * Function Prototypes
700 */
701extern void bnx2i_identify_device(struct bnx2i_hba *hba);
702extern void bnx2i_register_device(struct bnx2i_hba *hba);
703
704extern void bnx2i_ulp_init(struct cnic_dev *dev);
705extern void bnx2i_ulp_exit(struct cnic_dev *dev);
706extern void bnx2i_start(void *handle);
707extern void bnx2i_stop(void *handle);
708extern void bnx2i_reg_dev_all(void);
709extern void bnx2i_unreg_dev_all(void);
710extern struct bnx2i_hba *get_adapter_list_head(void);
711
712struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
713 u16 iscsi_cid);
714
715int bnx2i_alloc_ep_pool(void);
716void bnx2i_release_ep_pool(void);
717struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
718struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
719
720struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
721
722struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
723void bnx2i_free_hba(struct bnx2i_hba *hba);
724
725void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
726void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
727
728void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
729
730void bnx2i_drop_session(struct iscsi_cls_session *session);
731
732extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
733extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
734 struct iscsi_task *mtask);
735extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
736 struct iscsi_task *mtask);
737extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
738 struct bnx2i_cmd *cmnd);
739extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
740 struct iscsi_task *mtask, u32 ttt,
741 char *datap, int data_len, int unsol);
742extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
743 struct iscsi_task *mtask);
744extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
745 struct bnx2i_cmd *cmd);
746extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
747 struct bnx2i_endpoint *ep);
748extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
749extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
750 struct bnx2i_endpoint *ep);
751
752extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
753 struct bnx2i_endpoint *ep);
754extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
755 struct bnx2i_endpoint *ep);
756extern void bnx2i_ep_ofld_timer(unsigned long data);
757extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
758 struct bnx2i_hba *hba, u32 iscsi_cid);
759extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
760 struct bnx2i_hba *hba, u32 iscsi_cid);
761
762extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
763extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
764
765/* Debug related function prototypes */
766extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
767extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
768extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
769extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
770
771#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 00000000000..906cef5cda8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h>
16#include "bnx2i.h"
17
18/**
19 * bnx2i_get_cid_num - get cid from ep
20 * @ep: endpoint pointer
21 *
22 * Only applicable to 57710 family of devices
23 */
24static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
25{
26 u32 cid;
27
28 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
29 cid = ep->ep_cid;
30 else
31 cid = GET_CID_NUM(ep->ep_cid);
32 return cid;
33}
34
35
36/**
37 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
38 * @hba: Adapter for which adjustments is to be made
39 *
40 * Only applicable to 57710 family of devices
41 */
42static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
43{
44 u32 num_elements_per_pg;
45
46 if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
47 test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
48 test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
49 if (!is_power_of_2(hba->max_sqes))
50 hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
51
52 if (!is_power_of_2(hba->max_rqes))
53 hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
54 }
55
56 /* Adjust each queue size if the user selection does not
57 * yield integral num of page buffers
58 */
59 /* adjust SQ */
60 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
61 if (hba->max_sqes < num_elements_per_pg)
62 hba->max_sqes = num_elements_per_pg;
63 else if (hba->max_sqes % num_elements_per_pg)
64 hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
65 ~(num_elements_per_pg - 1);
66
67 /* adjust CQ */
68 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
69 if (hba->max_cqes < num_elements_per_pg)
70 hba->max_cqes = num_elements_per_pg;
71 else if (hba->max_cqes % num_elements_per_pg)
72 hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
73 ~(num_elements_per_pg - 1);
74
75 /* adjust RQ */
76 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
77 if (hba->max_rqes < num_elements_per_pg)
78 hba->max_rqes = num_elements_per_pg;
79 else if (hba->max_rqes % num_elements_per_pg)
80 hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
81 ~(num_elements_per_pg - 1);
82}
83
84
85/**
86 * bnx2i_get_link_state - get network interface link state
87 * @hba: adapter instance pointer
88 *
89 * updates adapter structure flag based on netdev state
90 */
91static void bnx2i_get_link_state(struct bnx2i_hba *hba)
92{
93 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
94 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
95 else
96 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
97}
98
99
100/**
101 * bnx2i_iscsi_license_error - displays iscsi license related error message
102 * @hba: adapter instance pointer
103 * @error_code: error classification
104 *
105 * Puts out an error log when driver is unable to offload iscsi connection
106 * due to license restrictions
107 */
108static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
109{
110 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
111 /* iSCSI offload not supported on this device */
112 printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
113 hba->netdev->name);
114 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
115 /* iSCSI offload not supported on this LOM device */
116 printk(KERN_ERR "bnx2i: LOM is not enable to "
117 "offload iSCSI connections, dev=%s\n",
118 hba->netdev->name);
119 set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
120}
121
122
123/**
124 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
125 * @ep: endpoint (transport indentifier) structure
126 * @action: action, ARM or DISARM. For now only ARM_CQE is used
127 *
128 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
129 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
130 * outstanding and on chip timer expires
131 */
132void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{
134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index;
136
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return;
139
140 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn +
142 ep->num_active_cmds / event_coal_div;
143 cq_index %= (ep->qp.cqe_size * 2 + 1);
144 if (!cq_index) {
145 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *)
147 ep->qp.cq_pgtbl_virt;
148 cq_db->sqn[0] = cq_index;
149 }
150 }
151}
152
153
154/**
155 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
156 * @conn: iscsi connection on which RQ event occured
157 * @ptr: driver buffer to which RQ buffer contents is to
158 * be copied
159 * @len: length of valid data inside RQ buf
160 *
161 * Copies RQ buffer contents from shared (DMA'able) memory region to
162 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
163 * scsi sense info
164 */
165void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
166{
167 if (!bnx2i_conn->ep->qp.rqe_left)
168 return;
169
170 bnx2i_conn->ep->qp.rqe_left--;
171 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
172 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
173 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
174 bnx2i_conn->ep->qp.rq_cons_idx = 0;
175 } else {
176 bnx2i_conn->ep->qp.rq_cons_qe++;
177 bnx2i_conn->ep->qp.rq_cons_idx++;
178 }
179}
180
181
182static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
183{
184 struct bnx2i_5771x_dbell dbell;
185 u32 msg;
186
187 memset(&dbell, 0, sizeof(dbell));
188 dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
189 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
190 msg = *((u32 *)&dbell);
191 /* TODO : get doorbell register mapping */
192 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
193}
194
195
196/**
197 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
198 * @conn: iscsi connection on which event to post
199 * @count: number of RQ buffer being posted to chip
200 *
201 * No need to ring hardware doorbell for 57710 family of devices
202 */
203void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
204{
205 struct bnx2i_5771x_sq_rq_db *rq_db;
206 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
207 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
208
209 ep->qp.rqe_left += count;
210 ep->qp.rq_prod_idx &= 0x7FFF;
211 ep->qp.rq_prod_idx += count;
212
213 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
214 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
215 if (!hi_bit)
216 ep->qp.rq_prod_idx |= 0x8000;
217 } else
218 ep->qp.rq_prod_idx |= hi_bit;
219
220 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
221 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
222 rq_db->prod_idx = ep->qp.rq_prod_idx;
223 /* no need to ring hardware doorbell for 57710 */
224 } else {
225 writew(ep->qp.rq_prod_idx,
226 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
227 }
228 mmiowb();
229}
230
231
232/**
233 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
234 * @conn: iscsi connection to which new SQ entries belong
235 * @count: number of SQ WQEs to post
236 *
237 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
238 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
239 * doorbell register
240 */
241static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
242{
243 struct bnx2i_5771x_sq_rq_db *sq_db;
244 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
245
246 ep->num_active_cmds++;
247 wmb(); /* flush SQ WQE memory before the doorbell is rung */
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
250 sq_db->prod_idx = ep->qp.sq_prod_idx;
251 bnx2i_ring_577xx_doorbell(bnx2i_conn);
252 } else
253 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
254
255 mmiowb(); /* flush posted PCI writes */
256}
257
258
259/**
260 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
261 * @conn: iscsi connection to which new SQ entries belong
262 * @count: number of SQ WQEs to post
263 *
264 * this routine will update SQ driver parameters and ring the doorbell
265 */
266static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
267 int count)
268{
269 int tmp_cnt;
270
271 if (count == 1) {
272 if (bnx2i_conn->ep->qp.sq_prod_qe ==
273 bnx2i_conn->ep->qp.sq_last_qe)
274 bnx2i_conn->ep->qp.sq_prod_qe =
275 bnx2i_conn->ep->qp.sq_first_qe;
276 else
277 bnx2i_conn->ep->qp.sq_prod_qe++;
278 } else {
279 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
280 bnx2i_conn->ep->qp.sq_last_qe)
281 bnx2i_conn->ep->qp.sq_prod_qe += count;
282 else {
283 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
284 bnx2i_conn->ep->qp.sq_prod_qe;
285 bnx2i_conn->ep->qp.sq_prod_qe =
286 &bnx2i_conn->ep->qp.sq_first_qe[count -
287 (tmp_cnt + 1)];
288 }
289 }
290 bnx2i_conn->ep->qp.sq_prod_idx += count;
291 /* Ring the doorbell */
292 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
293}
294
295
296/**
297 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
298 * @conn: iscsi connection
299 * @cmd: driver command structure which is requesting
300 * a WQE to sent to chip for further processing
301 *
302 * prepare and post an iSCSI Login request WQE to CNIC firmware
303 */
304int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
305 struct iscsi_task *task)
306{
307 struct bnx2i_cmd *bnx2i_cmd;
308 struct bnx2i_login_request *login_wqe;
309 struct iscsi_login *login_hdr;
310 u32 dword;
311
312 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
313 login_hdr = (struct iscsi_login *)task->hdr;
314 login_wqe = (struct bnx2i_login_request *)
315 bnx2i_conn->ep->qp.sq_prod_qe;
316
317 login_wqe->op_code = login_hdr->opcode;
318 login_wqe->op_attr = login_hdr->flags;
319 login_wqe->version_max = login_hdr->max_version;
320 login_wqe->version_min = login_hdr->min_version;
321 login_wqe->data_length = ntoh24(login_hdr->dlength);
322 login_wqe->isid_lo = *((u32 *) login_hdr->isid);
323 login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
324 login_wqe->tsih = login_hdr->tsih;
325 login_wqe->itt = task->itt |
326 (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
327 login_wqe->cid = login_hdr->cid;
328
329 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
330 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
331
332 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
333 login_wqe->resp_bd_list_addr_hi =
334 (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
335
336 dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
337 (bnx2i_conn->gen_pdu.resp_buf_size <<
338 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
339 login_wqe->resp_buffer = dword;
340 login_wqe->flags = 0;
341 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
342 login_wqe->bd_list_addr_hi =
343 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
344 login_wqe->num_bds = 1;
345 login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
346
347 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
348 return 0;
349}
350
351/**
352 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
353 * @conn: iscsi connection
354 * @mtask: driver command structure which is requesting
355 * a WQE to sent to chip for further processing
356 *
357 * prepare and post an iSCSI Login request WQE to CNIC firmware
358 */
359int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
360 struct iscsi_task *mtask)
361{
362 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
363 struct iscsi_tm *tmfabort_hdr;
364 struct scsi_cmnd *ref_sc;
365 struct iscsi_task *ctask;
366 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword;
369
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
372 tmfabort_wqe = (struct bnx2i_tmf_request *)
373 bnx2i_conn->ep->qp.sq_prod_qe;
374
375 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
376 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc)
388 /*
389 * the iscsi layer must have completed the cmd while this
390 * was starting up.
391 */
392 return 0;
393 ref_sc = ctask->sc;
394
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
403 tmfabort_wqe->bd_list_addr_hi = (u32)
404 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
405 tmfabort_wqe->num_bds = 1;
406 tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
407
408 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
409 return 0;
410}
411
412/**
413 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
414 * @conn: iscsi connection
415 * @cmd: driver command structure which is requesting
416 * a WQE to sent to chip for further processing
417 *
418 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
419 */
420int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
421 struct bnx2i_cmd *cmd)
422{
423 struct bnx2i_cmd_request *scsi_cmd_wqe;
424
425 scsi_cmd_wqe = (struct bnx2i_cmd_request *)
426 bnx2i_conn->ep->qp.sq_prod_qe;
427 memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
428 scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
429
430 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
431 return 0;
432}
433
434/**
435 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
436 * @conn: iscsi connection
437 * @cmd: driver command structure which is requesting
438 * a WQE to sent to chip for further processing
439 * @ttt: TTT to be used when building pdu header
440 * @datap: payload buffer pointer
441 * @data_len: payload data length
442 * @unsol: indicated whether nopout pdu is unsolicited pdu or
443 * in response to target's NOPIN w/ TTT != FFFFFFFF
444 *
445 * prepare and post a nopout request WQE to CNIC firmware
446 */
447int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
448 struct iscsi_task *task, u32 ttt,
449 char *datap, int data_len, int unsol)
450{
451 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
452 struct bnx2i_cmd *bnx2i_cmd;
453 struct bnx2i_nop_out_request *nopout_wqe;
454 struct iscsi_nopout *nopout_hdr;
455
456 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
457 nopout_hdr = (struct iscsi_nopout *)task->hdr;
458 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
459 nopout_wqe->op_code = nopout_hdr->opcode;
460 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
461 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
462
463 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
464 u32 tmp = nopout_hdr->lun[0];
465 /* 57710 requires LUN field to be swapped */
466 nopout_hdr->lun[0] = nopout_hdr->lun[1];
467 nopout_hdr->lun[1] = tmp;
468 }
469
470 nopout_wqe->itt = ((u16)task->itt |
471 (ISCSI_TASK_TYPE_MPATH <<
472 ISCSI_TMF_REQUEST_TYPE_SHIFT));
473 nopout_wqe->ttt = ttt;
474 nopout_wqe->flags = 0;
475 if (!unsol)
476 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
477 else if (nopout_hdr->itt == RESERVED_ITT)
478 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
479
480 nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
481 nopout_wqe->data_length = data_len;
482 if (data_len) {
483 /* handle payload data, not required in first release */
484 printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
485 } else {
486 nopout_wqe->bd_list_addr_lo = (u32)
487 bnx2i_conn->hba->mp_bd_dma;
488 nopout_wqe->bd_list_addr_hi =
489 (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
490 nopout_wqe->num_bds = 1;
491 }
492 nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
493
494 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
495 return 0;
496}
497
498
499/**
500 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
501 * @conn: iscsi connection
502 * @cmd: driver command structure which is requesting
503 * a WQE to sent to chip for further processing
504 *
505 * prepare and post logout request WQE to CNIC firmware
506 */
507int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
508 struct iscsi_task *task)
509{
510 struct bnx2i_cmd *bnx2i_cmd;
511 struct bnx2i_logout_request *logout_wqe;
512 struct iscsi_logout *logout_hdr;
513
514 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
515 logout_hdr = (struct iscsi_logout *)task->hdr;
516
517 logout_wqe = (struct bnx2i_logout_request *)
518 bnx2i_conn->ep->qp.sq_prod_qe;
519 memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
520
521 logout_wqe->op_code = logout_hdr->opcode;
522 logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
523 logout_wqe->op_attr =
524 logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
525 logout_wqe->itt = ((u16)task->itt |
526 (ISCSI_TASK_TYPE_MPATH <<
527 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
528 logout_wqe->data_length = 0;
529 logout_wqe->cid = 0;
530
531 logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
532 logout_wqe->bd_list_addr_hi = (u32)
533 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
534 logout_wqe->num_bds = 1;
535 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
536
537 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
538 return 0;
539}
540
541
542/**
543 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
544 * @conn: iscsi connection which requires iscsi parameter update
545 *
546 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
547 */
548void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
549{
550 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
551 struct bnx2i_hba *hba = bnx2i_conn->hba;
552 struct kwqe *kwqe_arr[2];
553 struct iscsi_kwqe_conn_update *update_wqe;
554 struct iscsi_kwqe_conn_update conn_update_kwqe;
555
556 update_wqe = &conn_update_kwqe;
557
558 update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
559 update_wqe->hdr.flags =
560 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
561
562 /* 5771x requires conn context id to be passed as is */
563 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
564 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
565 else
566 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
567 update_wqe->conn_flags = 0;
568 if (conn->hdrdgst_en)
569 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
570 if (conn->datadgst_en)
571 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
572 if (conn->session->initial_r2t_en)
573 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
574 if (conn->session->imm_data_en)
575 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
576
577 update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
578 update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
579 update_wqe->first_burst_length = conn->session->first_burst;
580 update_wqe->max_burst_length = conn->session->max_burst;
581 update_wqe->exp_stat_sn = conn->exp_statsn;
582 update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
583 update_wqe->session_error_recovery_level = conn->session->erl;
584 iscsi_conn_printk(KERN_ALERT, conn,
585 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
586 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
587 update_wqe->max_burst_length,
588 update_wqe->first_burst_length,
589 update_wqe->max_recv_pdu_length,
590 update_wqe->max_send_pdu_length);
591
592 kwqe_arr[0] = (struct kwqe *) update_wqe;
593 if (hba->cnic && hba->cnic->submit_kwqes)
594 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
595}
596
597
598/**
599 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
600 * @data: endpoint (transport handle) structure pointer
601 *
602 * routine to handle connection offload/destroy request timeout
603 */
604void bnx2i_ep_ofld_timer(unsigned long data)
605{
606 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
607
608 if (ep->state == EP_STATE_OFLD_START) {
609 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
610 ep->state = EP_STATE_OFLD_FAILED;
611 } else if (ep->state == EP_STATE_DISCONN_START) {
612 printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
613 ep->state = EP_STATE_DISCONN_TIMEDOUT;
614 } else if (ep->state == EP_STATE_CLEANUP_START) {
615 printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
616 ep->state = EP_STATE_CLEANUP_FAILED;
617 }
618
619 wake_up_interruptible(&ep->ofld_wait);
620}
621
622
623static int bnx2i_power_of2(u32 val)
624{
625 u32 power = 0;
626 if (val & (val - 1))
627 return power;
628 val--;
629 while (val) {
630 val = val >> 1;
631 power++;
632 }
633 return power;
634}
635
636
637/**
638 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
639 * @hba: adapter structure pointer
640 * @cmd: driver command structure which is requesting
641 * a WQE to sent to chip for further processing
642 *
643 * prepares and posts CONN_OFLD_REQ1/2 KWQE
644 */
645void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
646{
647 struct bnx2i_cleanup_request *cmd_cleanup;
648
649 cmd_cleanup =
650 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
651 memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
652
653 cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
654 cmd_cleanup->itt = cmd->req.itt;
655 cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
656
657 bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
658}
659
660
661/**
662 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
663 * @hba: adapter structure pointer
664 * @ep: endpoint (transport indentifier) structure
665 *
666 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
667 * iscsi connection context clean-up process
668 */
669void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
670{
671 struct kwqe *kwqe_arr[2];
672 struct iscsi_kwqe_conn_destroy conn_cleanup;
673
674 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
675
676 conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
677 conn_cleanup.hdr.flags =
678 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
679 /* 5771x requires conn context id to be passed as is */
680 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
681 conn_cleanup.context_id = ep->ep_cid;
682 else
683 conn_cleanup.context_id = (ep->ep_cid >> 7);
684
685 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
686
687 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
688 if (hba->cnic && hba->cnic->submit_kwqes)
689 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
690}
691
692
693/**
694 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
695 * @hba: adapter structure pointer
696 * @ep: endpoint (transport indentifier) structure
697 *
698 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
699 */
700static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
701 struct bnx2i_endpoint *ep)
702{
703 struct kwqe *kwqe_arr[2];
704 struct iscsi_kwqe_conn_offload1 ofld_req1;
705 struct iscsi_kwqe_conn_offload2 ofld_req2;
706 dma_addr_t dma_addr;
707 int num_kwqes = 2;
708 u32 *ptbl;
709
710 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
711 ofld_req1.hdr.flags =
712 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
713
714 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
715
716 dma_addr = ep->qp.sq_pgtbl_phys;
717 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
718 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
719
720 dma_addr = ep->qp.cq_pgtbl_phys;
721 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
722 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
723
724 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
725 ofld_req2.hdr.flags =
726 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
727
728 dma_addr = ep->qp.rq_pgtbl_phys;
729 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
730 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
731
732 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
733
734 ofld_req2.sq_first_pte.hi = *ptbl++;
735 ofld_req2.sq_first_pte.lo = *ptbl;
736
737 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
738 ofld_req2.cq_first_pte.hi = *ptbl++;
739 ofld_req2.cq_first_pte.lo = *ptbl;
740
741 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
742 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
743 ofld_req2.num_additional_wqes = 0;
744
745 if (hba->cnic && hba->cnic->submit_kwqes)
746 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
747}
748
749
750/**
751 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
752 * @hba: adapter structure pointer
753 * @ep: endpoint (transport indentifier) structure
754 *
755 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
756 */
757static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
758 struct bnx2i_endpoint *ep)
759{
760 struct kwqe *kwqe_arr[5];
761 struct iscsi_kwqe_conn_offload1 ofld_req1;
762 struct iscsi_kwqe_conn_offload2 ofld_req2;
763 struct iscsi_kwqe_conn_offload3 ofld_req3[1];
764 dma_addr_t dma_addr;
765 int num_kwqes = 2;
766 u32 *ptbl;
767
768 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
769 ofld_req1.hdr.flags =
770 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
771
772 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
773
774 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
775 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
776 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
777
778 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
779 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
780 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
781
782 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
783 ofld_req2.hdr.flags =
784 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
785
786 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
787 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
788 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
789
790 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
791 ofld_req2.sq_first_pte.hi = *ptbl++;
792 ofld_req2.sq_first_pte.lo = *ptbl;
793
794 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
795 ofld_req2.cq_first_pte.hi = *ptbl++;
796 ofld_req2.cq_first_pte.lo = *ptbl;
797
798 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
799 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
800
801 ofld_req2.num_additional_wqes = 1;
802 memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
803 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
804 ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
805 ofld_req3[0].qp_first_pte[0].lo = *ptbl;
806
807 kwqe_arr[2] = (struct kwqe *) ofld_req3;
808 /* need if we decide to go with multiple KCQE's per conn */
809 num_kwqes += 1;
810
811 if (hba->cnic && hba->cnic->submit_kwqes)
812 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
813}
814
815/**
816 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
817 *
818 * @hba: adapter structure pointer
819 * @ep: endpoint (transport indentifier) structure
820 *
821 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
822 */
823void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
824{
825 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
826 bnx2i_5771x_send_conn_ofld_req(hba, ep);
827 else
828 bnx2i_570x_send_conn_ofld_req(hba, ep);
829}
830
831
832/**
833 * setup_qp_page_tables - iscsi QP page table setup function
834 * @ep: endpoint (transport indentifier) structure
835 *
836 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
837 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
838 * PT in little endian format
839 */
840static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
841{
842 int num_pages;
843 u32 *ptbl;
844 dma_addr_t page;
845 int cnic_dev_10g;
846
847 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
848 cnic_dev_10g = 1;
849 else
850 cnic_dev_10g = 0;
851
852 /* SQ page table */
853 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
854 num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
855 page = ep->qp.sq_phys;
856
857 if (cnic_dev_10g)
858 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
859 else
860 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
861 while (num_pages--) {
862 if (cnic_dev_10g) {
863 /* PTE is written in little endian format for 57710 */
864 *ptbl = (u32) page;
865 ptbl++;
866 *ptbl = (u32) ((u64) page >> 32);
867 ptbl++;
868 page += PAGE_SIZE;
869 } else {
870 /* PTE is written in big endian format for
871 * 5706/5708/5709 devices */
872 *ptbl = (u32) ((u64) page >> 32);
873 ptbl++;
874 *ptbl = (u32) page;
875 ptbl++;
876 page += PAGE_SIZE;
877 }
878 }
879
880 /* RQ page table */
881 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
882 num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
883 page = ep->qp.rq_phys;
884
885 if (cnic_dev_10g)
886 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
887 else
888 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
889 while (num_pages--) {
890 if (cnic_dev_10g) {
891 /* PTE is written in little endian format for 57710 */
892 *ptbl = (u32) page;
893 ptbl++;
894 *ptbl = (u32) ((u64) page >> 32);
895 ptbl++;
896 page += PAGE_SIZE;
897 } else {
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl = (u32) ((u64) page >> 32);
901 ptbl++;
902 *ptbl = (u32) page;
903 ptbl++;
904 page += PAGE_SIZE;
905 }
906 }
907
908 /* CQ page table */
909 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
910 num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
911 page = ep->qp.cq_phys;
912
913 if (cnic_dev_10g)
914 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
915 else
916 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
917 while (num_pages--) {
918 if (cnic_dev_10g) {
919 /* PTE is written in little endian format for 57710 */
920 *ptbl = (u32) page;
921 ptbl++;
922 *ptbl = (u32) ((u64) page >> 32);
923 ptbl++;
924 page += PAGE_SIZE;
925 } else {
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl = (u32) ((u64) page >> 32);
929 ptbl++;
930 *ptbl = (u32) page;
931 ptbl++;
932 page += PAGE_SIZE;
933 }
934 }
935}
936
937
938/**
939 * bnx2i_alloc_qp_resc - allocates required resources for QP.
940 * @hba: adapter structure pointer
941 * @ep: endpoint (transport indentifier) structure
942 *
943 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
944 * memory for SQ/RQ/CQ and page tables. EP structure elements such
945 * as producer/consumer indexes/pointers, queue sizes and page table
946 * contents are setup
947 */
948int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
949{
950 struct bnx2i_5771x_cq_db *cq_db;
951
952 ep->hba = hba;
953 ep->conn = NULL;
954 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
955
956 /* Allocate page table memory for SQ which is page aligned */
957 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
958 ep->qp.sq_mem_size =
959 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
960 ep->qp.sq_pgtbl_size =
961 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
962 ep->qp.sq_pgtbl_size =
963 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
964
965 ep->qp.sq_pgtbl_virt =
966 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
967 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
968 if (!ep->qp.sq_pgtbl_virt) {
969 printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
970 ep->qp.sq_pgtbl_size);
971 goto mem_alloc_err;
972 }
973
974 /* Allocate memory area for actual SQ element */
975 ep->qp.sq_virt =
976 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
977 &ep->qp.sq_phys, GFP_KERNEL);
978 if (!ep->qp.sq_virt) {
979 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
980 ep->qp.sq_mem_size);
981 goto mem_alloc_err;
982 }
983
984 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
985 ep->qp.sq_first_qe = ep->qp.sq_virt;
986 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
987 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
988 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
989 ep->qp.sq_prod_idx = 0;
990 ep->qp.sq_cons_idx = 0;
991 ep->qp.sqe_left = hba->max_sqes;
992
993 /* Allocate page table memory for CQ which is page aligned */
994 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
995 ep->qp.cq_mem_size =
996 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
997 ep->qp.cq_pgtbl_size =
998 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
999 ep->qp.cq_pgtbl_size =
1000 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1001
1002 ep->qp.cq_pgtbl_virt =
1003 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1004 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1005 if (!ep->qp.cq_pgtbl_virt) {
1006 printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1007 ep->qp.cq_pgtbl_size);
1008 goto mem_alloc_err;
1009 }
1010
1011 /* Allocate memory area for actual CQ element */
1012 ep->qp.cq_virt =
1013 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1014 &ep->qp.cq_phys, GFP_KERNEL);
1015 if (!ep->qp.cq_virt) {
1016 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1017 ep->qp.cq_mem_size);
1018 goto mem_alloc_err;
1019 }
1020 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1021
1022 ep->qp.cq_first_qe = ep->qp.cq_virt;
1023 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1024 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1025 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1026 ep->qp.cq_prod_idx = 0;
1027 ep->qp.cq_cons_idx = 0;
1028 ep->qp.cqe_left = hba->max_cqes;
1029 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1030 ep->qp.cqe_size = hba->max_cqes;
1031
1032 /* Invalidate all EQ CQE index, req only for 57710 */
1033 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1034 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1035
1036 /* Allocate page table memory for RQ which is page aligned */
1037 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1038 ep->qp.rq_mem_size =
1039 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1040 ep->qp.rq_pgtbl_size =
1041 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
1042 ep->qp.rq_pgtbl_size =
1043 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1044
1045 ep->qp.rq_pgtbl_virt =
1046 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1047 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1048 if (!ep->qp.rq_pgtbl_virt) {
1049 printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1050 ep->qp.rq_pgtbl_size);
1051 goto mem_alloc_err;
1052 }
1053
1054 /* Allocate memory area for actual RQ element */
1055 ep->qp.rq_virt =
1056 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1057 &ep->qp.rq_phys, GFP_KERNEL);
1058 if (!ep->qp.rq_virt) {
1059 printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1060 ep->qp.rq_mem_size);
1061 goto mem_alloc_err;
1062 }
1063
1064 ep->qp.rq_first_qe = ep->qp.rq_virt;
1065 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1066 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1067 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1068 ep->qp.rq_prod_idx = 0x8000;
1069 ep->qp.rq_cons_idx = 0;
1070 ep->qp.rqe_left = hba->max_rqes;
1071
1072 setup_qp_page_tables(ep);
1073
1074 return 0;
1075
1076mem_alloc_err:
1077 bnx2i_free_qp_resc(hba, ep);
1078 return -ENOMEM;
1079}
1080
1081
1082
1083/**
1084 * bnx2i_free_qp_resc - free memory resources held by QP
1085 * @hba: adapter structure pointer
1086 * @ep: endpoint (transport indentifier) structure
1087 *
1088 * Free QP resources - SQ/RQ/CQ memory and page tables.
1089 */
1090void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1091{
1092 if (ep->qp.ctx_base) {
1093 iounmap(ep->qp.ctx_base);
1094 ep->qp.ctx_base = NULL;
1095 }
1096 /* Free SQ mem */
1097 if (ep->qp.sq_pgtbl_virt) {
1098 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1099 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1100 ep->qp.sq_pgtbl_virt = NULL;
1101 ep->qp.sq_pgtbl_phys = 0;
1102 }
1103 if (ep->qp.sq_virt) {
1104 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1105 ep->qp.sq_virt, ep->qp.sq_phys);
1106 ep->qp.sq_virt = NULL;
1107 ep->qp.sq_phys = 0;
1108 }
1109
1110 /* Free RQ mem */
1111 if (ep->qp.rq_pgtbl_virt) {
1112 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1113 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1114 ep->qp.rq_pgtbl_virt = NULL;
1115 ep->qp.rq_pgtbl_phys = 0;
1116 }
1117 if (ep->qp.rq_virt) {
1118 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1119 ep->qp.rq_virt, ep->qp.rq_phys);
1120 ep->qp.rq_virt = NULL;
1121 ep->qp.rq_phys = 0;
1122 }
1123
1124 /* Free CQ mem */
1125 if (ep->qp.cq_pgtbl_virt) {
1126 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1127 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1128 ep->qp.cq_pgtbl_virt = NULL;
1129 ep->qp.cq_pgtbl_phys = 0;
1130 }
1131 if (ep->qp.cq_virt) {
1132 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1133 ep->qp.cq_virt, ep->qp.cq_phys);
1134 ep->qp.cq_virt = NULL;
1135 ep->qp.cq_phys = 0;
1136 }
1137}
1138
1139
1140/**
1141 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1142 * @hba: adapter structure pointer
1143 *
1144 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1145 * This results in iSCSi support validation and on-chip context manager
1146 * initialization. Firmware completes this handshake with a CQE carrying
1147 * the result of iscsi support validation. Parameter carried by
1148 * iscsi init request determines the number of offloaded connection and
1149 * tolerance level for iscsi protocol violation this hba/chip can support
1150 */
1151int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1152{
1153 struct kwqe *kwqe_arr[3];
1154 struct iscsi_kwqe_init1 iscsi_init;
1155 struct iscsi_kwqe_init2 iscsi_init2;
1156 int rc = 0;
1157 u64 mask64;
1158
1159 bnx2i_adjust_qp_size(hba);
1160
1161 iscsi_init.flags =
1162 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1163 if (en_tcp_dack)
1164 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1165 iscsi_init.reserved0 = 0;
1166 iscsi_init.num_cqs = 1;
1167 iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1168 iscsi_init.hdr.flags =
1169 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1170
1171 iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1172 iscsi_init.dummy_buffer_addr_hi =
1173 (u32) ((u64) hba->dummy_buf_dma >> 32);
1174
1175 hba->ctx_ccell_tasks =
1176 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1177 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1178 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1179 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1180 iscsi_init.sq_num_wqes = hba->max_sqes;
1181 iscsi_init.cq_log_wqes_per_page =
1182 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
1183 iscsi_init.cq_num_wqes = hba->max_cqes;
1184 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1185 (PAGE_SIZE - 1)) / PAGE_SIZE;
1186 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1187 (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1189 iscsi_init.rq_num_wqes = hba->max_rqes;
1190
1191
1192 iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1193 iscsi_init2.hdr.flags =
1194 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1195 iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1196 mask64 = 0x0ULL;
1197 mask64 |= (
1198 /* CISCO MDS */
1199 (1UL <<
1200 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1201 /* HP MSA1510i */
1202 (1UL <<
1203 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1204 /* EMC */
1205 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1206 if (error_mask1)
1207 iscsi_init2.error_bit_map[0] = error_mask1;
1208 else
1209 iscsi_init2.error_bit_map[0] = (u32) mask64;
1210
1211 if (error_mask2)
1212 iscsi_init2.error_bit_map[1] = error_mask2;
1213 else
1214 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1215
1216 iscsi_error_mask = mask64;
1217
1218 kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1219 kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1220
1221 if (hba->cnic && hba->cnic->submit_kwqes)
1222 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1223 return rc;
1224}
1225
1226
1227/**
1228 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1229 * @conn: iscsi connection
1230 * @cqe: pointer to newly DMA'ed CQE entry for processing
1231 *
1232 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1233 */
1234static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1235 struct bnx2i_conn *bnx2i_conn,
1236 struct cqe *cqe)
1237{
1238 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1239 struct bnx2i_cmd_response *resp_cqe;
1240 struct bnx2i_cmd *bnx2i_cmd;
1241 struct iscsi_task *task;
1242 struct iscsi_cmd_rsp *hdr;
1243 u32 datalen = 0;
1244
1245 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1246 spin_lock(&session->lock);
1247 task = iscsi_itt_to_task(conn,
1248 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1249 if (!task)
1250 goto fail;
1251
1252 bnx2i_cmd = task->dd_data;
1253
1254 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1255 conn->datain_pdus_cnt +=
1256 resp_cqe->task_stat.read_stat.num_data_outs;
1257 conn->rxdata_octets +=
1258 bnx2i_cmd->req.total_data_transfer_length;
1259 } else {
1260 conn->dataout_pdus_cnt +=
1261 resp_cqe->task_stat.read_stat.num_data_outs;
1262 conn->r2t_pdus_cnt +=
1263 resp_cqe->task_stat.read_stat.num_r2ts;
1264 conn->txdata_octets +=
1265 bnx2i_cmd->req.total_data_transfer_length;
1266 }
1267 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1268
1269 hdr = (struct iscsi_cmd_rsp *)task->hdr;
1270 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1271 hdr->opcode = resp_cqe->op_code;
1272 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1273 hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1274 hdr->response = resp_cqe->response;
1275 hdr->cmd_status = resp_cqe->status;
1276 hdr->flags = resp_cqe->response_flags;
1277 hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1278
1279 if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1280 goto done;
1281
1282 if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1283 datalen = resp_cqe->data_length;
1284 if (datalen < 2)
1285 goto done;
1286
1287 if (datalen > BNX2I_RQ_WQE_SIZE) {
1288 iscsi_conn_printk(KERN_ERR, conn,
1289 "sense data len %d > RQ sz\n",
1290 datalen);
1291 datalen = BNX2I_RQ_WQE_SIZE;
1292 } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1293 iscsi_conn_printk(KERN_ERR, conn,
1294 "sense data len %d > conn data\n",
1295 datalen);
1296 datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1297 }
1298
1299 bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1300 bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1301 }
1302
1303done:
1304 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1305 conn->data, datalen);
1306fail:
1307 spin_unlock(&session->lock);
1308 return 0;
1309}
1310
1311
1312/**
1313 * bnx2i_process_login_resp - this function handles iscsi login response
1314 * @session: iscsi session pointer
1315 * @bnx2i_conn: iscsi connection pointer
1316 * @cqe: pointer to newly DMA'ed CQE entry for processing
1317 *
1318 * process Login Response CQE & complete it to open-iscsi user daemon
1319 */
1320static int bnx2i_process_login_resp(struct iscsi_session *session,
1321 struct bnx2i_conn *bnx2i_conn,
1322 struct cqe *cqe)
1323{
1324 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1325 struct iscsi_task *task;
1326 struct bnx2i_login_response *login;
1327 struct iscsi_login_rsp *resp_hdr;
1328 int pld_len;
1329 int pad_len;
1330
1331 login = (struct bnx2i_login_response *) cqe;
1332 spin_lock(&session->lock);
1333 task = iscsi_itt_to_task(conn,
1334 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1335 if (!task)
1336 goto done;
1337
1338 resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1339 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1340 resp_hdr->opcode = login->op_code;
1341 resp_hdr->flags = login->response_flags;
1342 resp_hdr->max_version = login->version_max;
1343 resp_hdr->active_version = login->version_active;;
1344 resp_hdr->hlength = 0;
1345
1346 hton24(resp_hdr->dlength, login->data_length);
1347 memcpy(resp_hdr->isid, &login->isid_lo, 6);
1348 resp_hdr->tsih = cpu_to_be16(login->tsih);
1349 resp_hdr->itt = task->hdr->itt;
1350 resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1351 resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1352 resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1353 resp_hdr->status_class = login->status_class;
1354 resp_hdr->status_detail = login->status_detail;
1355 pld_len = login->data_length;
1356 bnx2i_conn->gen_pdu.resp_wr_ptr =
1357 bnx2i_conn->gen_pdu.resp_buf + pld_len;
1358
1359 pad_len = 0;
1360 if (pld_len & 0x3)
1361 pad_len = 4 - (pld_len % 4);
1362
1363 if (pad_len) {
1364 int i = 0;
1365 for (i = 0; i < pad_len; i++) {
1366 bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1367 bnx2i_conn->gen_pdu.resp_wr_ptr++;
1368 }
1369 }
1370
1371 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1372 bnx2i_conn->gen_pdu.resp_buf,
1373 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1374done:
1375 spin_unlock(&session->lock);
1376 return 0;
1377}
1378
1379/**
1380 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1381 * @session: iscsi session pointer
1382 * @bnx2i_conn: iscsi connection pointer
1383 * @cqe: pointer to newly DMA'ed CQE entry for processing
1384 *
1385 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1386 */
1387static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1388 struct bnx2i_conn *bnx2i_conn,
1389 struct cqe *cqe)
1390{
1391 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1392 struct iscsi_task *task;
1393 struct bnx2i_tmf_response *tmf_cqe;
1394 struct iscsi_tm_rsp *resp_hdr;
1395
1396 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1397 spin_lock(&session->lock);
1398 task = iscsi_itt_to_task(conn,
1399 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1400 if (!task)
1401 goto done;
1402
1403 resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1404 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1405 resp_hdr->opcode = tmf_cqe->op_code;
1406 resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1407 resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1408 resp_hdr->itt = task->hdr->itt;
1409 resp_hdr->response = tmf_cqe->response;
1410
1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1412done:
1413 spin_unlock(&session->lock);
1414 return 0;
1415}
1416
1417/**
1418 * bnx2i_process_logout_resp - this function handles iscsi logout response
1419 * @session: iscsi session pointer
1420 * @bnx2i_conn: iscsi connection pointer
1421 * @cqe: pointer to newly DMA'ed CQE entry for processing
1422 *
1423 * process iSCSI Logout Response CQE & make function call to
1424 * notify the user daemon.
1425 */
1426static int bnx2i_process_logout_resp(struct iscsi_session *session,
1427 struct bnx2i_conn *bnx2i_conn,
1428 struct cqe *cqe)
1429{
1430 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1431 struct iscsi_task *task;
1432 struct bnx2i_logout_response *logout;
1433 struct iscsi_logout_rsp *resp_hdr;
1434
1435 logout = (struct bnx2i_logout_response *) cqe;
1436 spin_lock(&session->lock);
1437 task = iscsi_itt_to_task(conn,
1438 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1439 if (!task)
1440 goto done;
1441
1442 resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1443 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1444 resp_hdr->opcode = logout->op_code;
1445 resp_hdr->flags = logout->response;
1446 resp_hdr->hlength = 0;
1447
1448 resp_hdr->itt = task->hdr->itt;
1449 resp_hdr->statsn = task->hdr->exp_statsn;
1450 resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1451 resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1452
1453 resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1454 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1455
1456 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1457done:
1458 spin_unlock(&session->lock);
1459 return 0;
1460}
1461
1462/**
1463 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1464 * @session: iscsi session pointer
1465 * @bnx2i_conn: iscsi connection pointer
1466 * @cqe: pointer to newly DMA'ed CQE entry for processing
1467 *
1468 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1469 */
1470static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1471 struct bnx2i_conn *bnx2i_conn,
1472 struct cqe *cqe)
1473{
1474 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1475 struct bnx2i_nop_in_msg *nop_in;
1476 struct iscsi_task *task;
1477
1478 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1479 spin_lock(&session->lock);
1480 task = iscsi_itt_to_task(conn,
1481 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1482 if (task)
1483 iscsi_put_task(task);
1484 spin_unlock(&session->lock);
1485}
1486
1487/**
1488 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1489 * @conn: iscsi connection
1490 *
1491 * Firmware advances RQ producer index for every unsolicited PDU even if
1492 * payload data length is '0'. This function makes corresponding
1493 * adjustments on the driver side to match this f/w behavior
1494 */
1495static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1496{
1497 char dummy_rq_data[2];
1498 bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1499 bnx2i_put_rq_buf(bnx2i_conn, 1);
1500}
1501
1502
1503/**
1504 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1505 * @session: iscsi session pointer
1506 * @bnx2i_conn: iscsi connection pointer
1507 * @cqe: pointer to newly DMA'ed CQE entry for processing
1508 *
1509 * process iSCSI target's proactive iSCSI NOPIN request
1510 */
1511static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1512 struct bnx2i_conn *bnx2i_conn,
1513 struct cqe *cqe)
1514{
1515 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1516 struct iscsi_task *task;
1517 struct bnx2i_nop_in_msg *nop_in;
1518 struct iscsi_nopin *hdr;
1519 u32 itt;
1520 int tgt_async_nop = 0;
1521
1522 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1523 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1524
1525 spin_lock(&session->lock);
1526 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1527 memset(hdr, 0, sizeof(struct iscsi_hdr));
1528 hdr->opcode = nop_in->op_code;
1529 hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1530 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1531 hdr->ttt = cpu_to_be32(nop_in->ttt);
1532
1533 if (itt == (u16) RESERVED_ITT) {
1534 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1535 hdr->itt = RESERVED_ITT;
1536 tgt_async_nop = 1;
1537 goto done;
1538 }
1539
1540 /* this is a response to one of our nop-outs */
1541 task = iscsi_itt_to_task(conn, itt);
1542 if (task) {
1543 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 hdr->itt = task->hdr->itt;
1545 hdr->ttt = cpu_to_be32(nop_in->ttt);
1546 memcpy(hdr->lun, nop_in->lun, 8);
1547 }
1548done:
1549 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1550 spin_unlock(&session->lock);
1551
1552 return tgt_async_nop;
1553}
1554
1555
1556/**
1557 * bnx2i_process_async_mesg - this function handles iscsi async message
1558 * @session: iscsi session pointer
1559 * @bnx2i_conn: iscsi connection pointer
1560 * @cqe: pointer to newly DMA'ed CQE entry for processing
1561 *
1562 * process iSCSI ASYNC Message
1563 */
1564static void bnx2i_process_async_mesg(struct iscsi_session *session,
1565 struct bnx2i_conn *bnx2i_conn,
1566 struct cqe *cqe)
1567{
1568 struct bnx2i_async_msg *async_cqe;
1569 struct iscsi_async *resp_hdr;
1570 u8 async_event;
1571
1572 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1573
1574 async_cqe = (struct bnx2i_async_msg *)cqe;
1575 async_event = async_cqe->async_event;
1576
1577 if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1578 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1579 "async: scsi events not supported\n");
1580 return;
1581 }
1582
1583 spin_lock(&session->lock);
1584 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1585 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1586 resp_hdr->opcode = async_cqe->op_code;
1587 resp_hdr->flags = 0x80;
1588
1589 memcpy(resp_hdr->lun, async_cqe->lun, 8);
1590 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1591 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1592
1593 resp_hdr->async_event = async_cqe->async_event;
1594 resp_hdr->async_vcode = async_cqe->async_vcode;
1595
1596 resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1597 resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1598 resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1599
1600 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1601 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602 spin_unlock(&session->lock);
1603}
1604
1605
1606/**
1607 * bnx2i_process_reject_mesg - process iscsi reject pdu
1608 * @session: iscsi session pointer
1609 * @bnx2i_conn: iscsi connection pointer
1610 * @cqe: pointer to newly DMA'ed CQE entry for processing
1611 *
1612 * process iSCSI REJECT message
1613 */
1614static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1615 struct bnx2i_conn *bnx2i_conn,
1616 struct cqe *cqe)
1617{
1618 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1619 struct bnx2i_reject_msg *reject;
1620 struct iscsi_reject *hdr;
1621
1622 reject = (struct bnx2i_reject_msg *) cqe;
1623 if (reject->data_length) {
1624 bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1625 bnx2i_put_rq_buf(bnx2i_conn, 1);
1626 } else
1627 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1628
1629 spin_lock(&session->lock);
1630 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1631 memset(hdr, 0, sizeof(struct iscsi_hdr));
1632 hdr->opcode = reject->op_code;
1633 hdr->reason = reject->reason;
1634 hton24(hdr->dlength, reject->data_length);
1635 hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1636 hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1637 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1638 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1639 reject->data_length);
1640 spin_unlock(&session->lock);
1641}
1642
1643/**
1644 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1645 * @session: iscsi session pointer
1646 * @bnx2i_conn: iscsi connection pointer
1647 * @cqe: pointer to newly DMA'ed CQE entry for processing
1648 *
1649 * process command cleanup response CQE during conn shutdown or error recovery
1650 */
1651static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1652 struct bnx2i_conn *bnx2i_conn,
1653 struct cqe *cqe)
1654{
1655 struct bnx2i_cleanup_response *cmd_clean_rsp;
1656 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1657 struct iscsi_task *task;
1658
1659 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1660 spin_lock(&session->lock);
1661 task = iscsi_itt_to_task(conn,
1662 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1663 if (!task)
1664 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1665 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1666 spin_unlock(&session->lock);
1667 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1668}
1669
1670
1671
1672/**
1673 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1674 * @bnx2i_conn: iscsi connection
1675 *
1676 * this function is called by generic KCQ handler to process all pending CQE's
1677 */
1678static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1679{
1680 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1681 struct iscsi_session *session = conn->session;
1682 struct qp_info *qp = &bnx2i_conn->ep->qp;
1683 struct bnx2i_nop_in_msg *nopin;
1684 int tgt_async_msg;
1685
1686 while (1) {
1687 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1688 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1689 break;
1690
1691 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
1692 break;
1693
1694 tgt_async_msg = 0;
1695
1696 switch (nopin->op_code) {
1697 case ISCSI_OP_SCSI_CMD_RSP:
1698 case ISCSI_OP_SCSI_DATA_IN:
1699 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
1700 qp->cq_cons_qe);
1701 break;
1702 case ISCSI_OP_LOGIN_RSP:
1703 bnx2i_process_login_resp(session, bnx2i_conn,
1704 qp->cq_cons_qe);
1705 break;
1706 case ISCSI_OP_SCSI_TMFUNC_RSP:
1707 bnx2i_process_tmf_resp(session, bnx2i_conn,
1708 qp->cq_cons_qe);
1709 break;
1710 case ISCSI_OP_LOGOUT_RSP:
1711 bnx2i_process_logout_resp(session, bnx2i_conn,
1712 qp->cq_cons_qe);
1713 break;
1714 case ISCSI_OP_NOOP_IN:
1715 if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
1716 qp->cq_cons_qe))
1717 tgt_async_msg = 1;
1718 break;
1719 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
1720 bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
1721 qp->cq_cons_qe);
1722 break;
1723 case ISCSI_OP_ASYNC_EVENT:
1724 bnx2i_process_async_mesg(session, bnx2i_conn,
1725 qp->cq_cons_qe);
1726 tgt_async_msg = 1;
1727 break;
1728 case ISCSI_OP_REJECT:
1729 bnx2i_process_reject_mesg(session, bnx2i_conn,
1730 qp->cq_cons_qe);
1731 break;
1732 case ISCSI_OPCODE_CLEANUP_RESPONSE:
1733 bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
1734 qp->cq_cons_qe);
1735 break;
1736 default:
1737 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1738 nopin->op_code);
1739 }
1740
1741 if (!tgt_async_msg)
1742 bnx2i_conn->ep->num_active_cmds--;
1743
1744 /* clear out in production version only, till beta keep opcode
1745 * field intact, will be helpful in debugging (context dump)
1746 * nopin->op_code = 0;
1747 */
1748 qp->cqe_exp_seq_sn++;
1749 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1750 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1751
1752 if (qp->cq_cons_qe == qp->cq_last_qe) {
1753 qp->cq_cons_qe = qp->cq_first_qe;
1754 qp->cq_cons_idx = 0;
1755 } else {
1756 qp->cq_cons_qe++;
1757 qp->cq_cons_idx++;
1758 }
1759 }
1760 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1761}
1762
1763/**
1764 * bnx2i_fastpath_notification - process global event queue (KCQ)
1765 * @hba: adapter structure pointer
1766 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1767 *
1768 * Fast path event notification handler, KCQ entry carries context id
1769 * of the connection that has 1 or more pending CQ entries
1770 */
1771static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1772 struct iscsi_kcqe *new_cqe_kcqe)
1773{
1774 struct bnx2i_conn *conn;
1775 u32 iscsi_cid;
1776
1777 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1778 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1779
1780 if (!conn) {
1781 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1782 return;
1783 }
1784 if (!conn->ep) {
1785 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1786 return;
1787 }
1788
1789 bnx2i_process_new_cqes(conn);
1790}
1791
1792
1793/**
1794 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1795 * @hba: adapter structure pointer
1796 * @update_kcqe: kcqe pointer
1797 *
1798 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1799 */
1800static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
1801 struct iscsi_kcqe *update_kcqe)
1802{
1803 struct bnx2i_conn *conn;
1804 u32 iscsi_cid;
1805
1806 iscsi_cid = update_kcqe->iscsi_conn_id;
1807 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1808
1809 if (!conn) {
1810 printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
1811 return;
1812 }
1813 if (!conn->ep) {
1814 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
1815 return;
1816 }
1817
1818 if (update_kcqe->completion_status) {
1819 printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
1820 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
1821 } else
1822 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
1823
1824 wake_up_interruptible(&conn->ep->ofld_wait);
1825}
1826
1827
1828/**
1829 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1830 * @hba: adapter structure pointer
1831 * @bnx2i_conn: iscsi connection
1832 *
1833 * Add connection to recovery queue and schedule adapter eh worker
1834 */
1835static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
1836 struct bnx2i_conn *bnx2i_conn)
1837{
1838 iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
1839 ISCSI_ERR_CONN_FAILED);
1840}
1841
1842
1843/**
1844 * bnx2i_process_tcp_error - process error notification on a given connection
1845 *
1846 * @hba: adapter structure pointer
1847 * @tcp_err: tcp error kcqe pointer
1848 *
1849 * handles tcp level error notifications from FW.
1850 */
1851static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
1852 struct iscsi_kcqe *tcp_err)
1853{
1854 struct bnx2i_conn *bnx2i_conn;
1855 u32 iscsi_cid;
1856
1857 iscsi_cid = tcp_err->iscsi_conn_id;
1858 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1859
1860 if (!bnx2i_conn) {
1861 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1862 return;
1863 }
1864
1865 printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1866 iscsi_cid, tcp_err->completion_status);
1867 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
1868}
1869
1870
1871/**
1872 * bnx2i_process_iscsi_error - process error notification on a given connection
1873 * @hba: adapter structure pointer
1874 * @iscsi_err: iscsi error kcqe pointer
1875 *
1876 * handles iscsi error notifications from the FW. Firmware based in initial
1877 * handshake classifies iscsi protocol / TCP rfc violation into either
1878 * warning or error indications. If indication is of "Error" type, driver
1879 * will initiate session recovery for that connection/session. For
1880 * "Warning" type indication, driver will put out a system log message
1881 * (there will be only one message for each type for the life of the
1882 * session, this is to avoid un-necessarily overloading the system)
1883 */
1884static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
1885 struct iscsi_kcqe *iscsi_err)
1886{
1887 struct bnx2i_conn *bnx2i_conn;
1888 u32 iscsi_cid;
1889 char warn_notice[] = "iscsi_warning";
1890 char error_notice[] = "iscsi_error";
1891 char additional_notice[64];
1892 char *message;
1893 int need_recovery;
1894 u64 err_mask64;
1895
1896 iscsi_cid = iscsi_err->iscsi_conn_id;
1897 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1898 if (!bnx2i_conn) {
1899 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1900 return;
1901 }
1902
1903 err_mask64 = (0x1ULL << iscsi_err->completion_status);
1904
1905 if (err_mask64 & iscsi_error_mask) {
1906 need_recovery = 0;
1907 message = warn_notice;
1908 } else {
1909 need_recovery = 1;
1910 message = error_notice;
1911 }
1912
1913 switch (iscsi_err->completion_status) {
1914 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
1915 strcpy(additional_notice, "hdr digest err");
1916 break;
1917 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
1918 strcpy(additional_notice, "data digest err");
1919 break;
1920 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
1921 strcpy(additional_notice, "wrong opcode rcvd");
1922 break;
1923 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
1924 strcpy(additional_notice, "AHS len > 0 rcvd");
1925 break;
1926 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
1927 strcpy(additional_notice, "invalid ITT rcvd");
1928 break;
1929 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
1930 strcpy(additional_notice, "wrong StatSN rcvd");
1931 break;
1932 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
1933 strcpy(additional_notice, "wrong DataSN rcvd");
1934 break;
1935 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
1936 strcpy(additional_notice, "pend R2T violation");
1937 break;
1938 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
1939 strcpy(additional_notice, "ERL0, UO");
1940 break;
1941 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
1942 strcpy(additional_notice, "ERL0, U1");
1943 break;
1944 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
1945 strcpy(additional_notice, "ERL0, U2");
1946 break;
1947 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
1948 strcpy(additional_notice, "ERL0, U3");
1949 break;
1950 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
1951 strcpy(additional_notice, "ERL0, U4");
1952 break;
1953 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
1954 strcpy(additional_notice, "ERL0, U5");
1955 break;
1956 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
1957 strcpy(additional_notice, "ERL0, U6");
1958 break;
1959 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
1960 strcpy(additional_notice, "invalid resi len");
1961 break;
1962 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
1963 strcpy(additional_notice, "MRDSL violation");
1964 break;
1965 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
1966 strcpy(additional_notice, "F-bit not set");
1967 break;
1968 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
1969 strcpy(additional_notice, "invalid TTT");
1970 break;
1971 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
1972 strcpy(additional_notice, "invalid DataSN");
1973 break;
1974 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
1975 strcpy(additional_notice, "burst len violation");
1976 break;
1977 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
1978 strcpy(additional_notice, "buf offset violation");
1979 break;
1980 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
1981 strcpy(additional_notice, "invalid LUN field");
1982 break;
1983 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
1984 strcpy(additional_notice, "invalid R2TSN field");
1985 break;
1986#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
1987 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
1988 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
1989 strcpy(additional_notice, "invalid cmd len1");
1990 break;
1991#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
1992 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
1993 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
1994 strcpy(additional_notice, "invalid cmd len2");
1995 break;
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
1997 strcpy(additional_notice,
1998 "pend r2t exceeds MaxOutstandingR2T value");
1999 break;
2000 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2001 strcpy(additional_notice, "TTT is rsvd");
2002 break;
2003 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2004 strcpy(additional_notice, "MBL violation");
2005 break;
2006#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2007 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2008 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2009 strcpy(additional_notice, "data seg len != 0");
2010 break;
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2012 strcpy(additional_notice, "reject pdu len error");
2013 break;
2014 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2015 strcpy(additional_notice, "async pdu len error");
2016 break;
2017 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2018 strcpy(additional_notice, "nopin pdu len error");
2019 break;
2020#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2021 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2022 case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2023 strcpy(additional_notice, "pend r2t in cleanup");
2024 break;
2025
2026 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2027 strcpy(additional_notice, "IP fragments rcvd");
2028 break;
2029 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2030 strcpy(additional_notice, "IP options error");
2031 break;
2032 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2033 strcpy(additional_notice, "urgent flag error");
2034 break;
2035 default:
2036 printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2037 iscsi_err->completion_status);
2038 }
2039
2040 if (need_recovery) {
2041 iscsi_conn_printk(KERN_ALERT,
2042 bnx2i_conn->cls_conn->dd_data,
2043 "bnx2i: %s - %s\n",
2044 message, additional_notice);
2045
2046 iscsi_conn_printk(KERN_ALERT,
2047 bnx2i_conn->cls_conn->dd_data,
2048 "conn_err - hostno %d conn %p, "
2049 "iscsi_cid %x cid %x\n",
2050 bnx2i_conn->hba->shost->host_no,
2051 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2052 bnx2i_conn->ep->ep_cid);
2053 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2054 } else
2055 if (!test_and_set_bit(iscsi_err->completion_status,
2056 (void *) &bnx2i_conn->violation_notified))
2057 iscsi_conn_printk(KERN_ALERT,
2058 bnx2i_conn->cls_conn->dd_data,
2059 "bnx2i: %s - %s\n",
2060 message, additional_notice);
2061}
2062
2063
2064/**
2065 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2066 * @hba: adapter structure pointer
2067 * @conn_destroy: conn destroy kcqe pointer
2068 *
2069 * handles connection destroy completion request.
2070 */
2071static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2072 struct iscsi_kcqe *conn_destroy)
2073{
2074 struct bnx2i_endpoint *ep;
2075
2076 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2077 if (!ep) {
2078 printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2079 "offload request, unexpected complection\n");
2080 return;
2081 }
2082
2083 if (hba != ep->hba) {
2084 printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2085 return;
2086 }
2087
2088 if (conn_destroy->completion_status) {
2089 printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2090 ep->state = EP_STATE_CLEANUP_FAILED;
2091 } else
2092 ep->state = EP_STATE_CLEANUP_CMPL;
2093 wake_up_interruptible(&ep->ofld_wait);
2094}
2095
2096
2097/**
2098 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2099 * @hba: adapter structure pointer
2100 * @ofld_kcqe: conn offload kcqe pointer
2101 *
2102 * handles initial connection offload completion, ep_connect() thread is
2103 * woken-up to continue with LLP connect process
2104 */
2105static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2106 struct iscsi_kcqe *ofld_kcqe)
2107{
2108 u32 cid_addr;
2109 struct bnx2i_endpoint *ep;
2110 u32 cid_num;
2111
2112 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2113 if (!ep) {
2114 printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2115 return;
2116 }
2117
2118 if (hba != ep->hba) {
2119 printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2120 return;
2121 }
2122
2123 if (ofld_kcqe->completion_status) {
2124 if (ofld_kcqe->completion_status ==
2125 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2126 printk(KERN_ALERT "bnx2i: unable to allocate"
2127 " iSCSI context resources\n");
2128 ep->state = EP_STATE_OFLD_FAILED;
2129 } else {
2130 ep->state = EP_STATE_OFLD_COMPL;
2131 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2132 cid_num = bnx2i_get_cid_num(ep);
2133 ep->ep_cid = cid_addr;
2134 ep->qp.ctx_base = NULL;
2135 }
2136 wake_up_interruptible(&ep->ofld_wait);
2137}
2138
2139/**
2140 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2141 * @hba: adapter structure pointer
2142 * @update_kcqe: kcqe pointer
2143 *
2144 * Generic KCQ event handler/dispatcher
2145 */
2146static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2147 u32 num_cqe)
2148{
2149 struct bnx2i_hba *hba = context;
2150 int i = 0;
2151 struct iscsi_kcqe *ikcqe = NULL;
2152
2153 while (i < num_cqe) {
2154 ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2155
2156 if (ikcqe->op_code ==
2157 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2158 bnx2i_fastpath_notification(hba, ikcqe);
2159 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2160 bnx2i_process_ofld_cmpl(hba, ikcqe);
2161 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2162 bnx2i_process_update_conn_cmpl(hba, ikcqe);
2163 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2164 if (ikcqe->completion_status !=
2165 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2166 bnx2i_iscsi_license_error(hba, ikcqe->\
2167 completion_status);
2168 else {
2169 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2170 bnx2i_get_link_state(hba);
2171 printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2172 "ISCSI_INIT passed\n",
2173 (u8)hba->pcidev->bus->number,
2174 hba->pci_devno,
2175 (u8)hba->pci_func);
2176
2177
2178 }
2179 } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2180 bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2181 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2182 bnx2i_process_iscsi_error(hba, ikcqe);
2183 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2184 bnx2i_process_tcp_error(hba, ikcqe);
2185 else
2186 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2187 ikcqe->op_code);
2188 }
2189}
2190
2191
2192/**
2193 * bnx2i_indicate_netevent - Generic netdev event handler
2194 * @context: adapter structure pointer
2195 * @event: event type
2196 *
2197 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2198 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2199 */
2200static void bnx2i_indicate_netevent(void *context, unsigned long event)
2201{
2202 struct bnx2i_hba *hba = context;
2203
2204 switch (event) {
2205 case NETDEV_UP:
2206 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2207 bnx2i_send_fw_iscsi_init_msg(hba);
2208 break;
2209 case NETDEV_DOWN:
2210 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2211 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2212 break;
2213 case NETDEV_GOING_DOWN:
2214 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2215 iscsi_host_for_each_session(hba->shost,
2216 bnx2i_drop_session);
2217 break;
2218 case NETDEV_CHANGE:
2219 bnx2i_get_link_state(hba);
2220 break;
2221 default:
2222 ;
2223 }
2224}
2225
2226
2227/**
2228 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2229 * @cm_sk: cnic sock structure pointer
2230 *
2231 * function callback exported via bnx2i - cnic driver interface to
2232 * indicate completion of option-2 TCP connect request.
2233 */
2234static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2235{
2236 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2237
2238 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2239 ep->state = EP_STATE_CONNECT_FAILED;
2240 else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2241 ep->state = EP_STATE_CONNECT_COMPL;
2242 else
2243 ep->state = EP_STATE_CONNECT_FAILED;
2244
2245 wake_up_interruptible(&ep->ofld_wait);
2246}
2247
2248
2249/**
2250 * bnx2i_cm_close_cmpl - process tcp conn close completion
2251 * @cm_sk: cnic sock structure pointer
2252 *
2253 * function callback exported via bnx2i - cnic driver interface to
2254 * indicate completion of option-2 graceful TCP connect shutdown
2255 */
2256static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2257{
2258 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2259
2260 ep->state = EP_STATE_DISCONN_COMPL;
2261 wake_up_interruptible(&ep->ofld_wait);
2262}
2263
2264
2265/**
2266 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2267 * @cm_sk: cnic sock structure pointer
2268 *
2269 * function callback exported via bnx2i - cnic driver interface to
2270 * indicate completion of option-2 abortive TCP connect termination
2271 */
2272static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2273{
2274 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2275
2276 ep->state = EP_STATE_DISCONN_COMPL;
2277 wake_up_interruptible(&ep->ofld_wait);
2278}
2279
2280
2281/**
2282 * bnx2i_cm_remote_close - process received TCP FIN
2283 * @hba: adapter structure pointer
2284 * @update_kcqe: kcqe pointer
2285 *
2286 * function callback exported via bnx2i - cnic driver interface to indicate
2287 * async TCP events such as FIN
2288 */
2289static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2290{
2291 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2292
2293 ep->state = EP_STATE_TCP_FIN_RCVD;
2294 if (ep->conn)
2295 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2296}
2297
2298/**
2299 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2300 * @hba: adapter structure pointer
2301 * @update_kcqe: kcqe pointer
2302 *
2303 * function callback exported via bnx2i - cnic driver interface to
2304 * indicate async TCP events (RST) sent by the peer.
2305 */
2306static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2307{
2308 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2309
2310 ep->state = EP_STATE_TCP_RST_RCVD;
2311 if (ep->conn)
2312 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2313}
2314
2315
2316static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
2317 char *buf, u16 buflen)
2318{
2319 struct bnx2i_hba *hba;
2320
2321 hba = bnx2i_find_hba_for_cnic(dev);
2322 if (!hba)
2323 return;
2324
2325 if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2326 msg_type, buf, buflen))
2327 printk(KERN_ALERT "bnx2i: private nl message send error\n");
2328
2329}
2330
2331
2332/**
2333 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2334 * carrying callback function pointers
2335 *
2336 */
2337struct cnic_ulp_ops bnx2i_cnic_cb = {
2338 .cnic_init = bnx2i_ulp_init,
2339 .cnic_exit = bnx2i_ulp_exit,
2340 .cnic_start = bnx2i_start,
2341 .cnic_stop = bnx2i_stop,
2342 .indicate_kcqes = bnx2i_indicate_kcqe,
2343 .indicate_netevent = bnx2i_indicate_netevent,
2344 .cm_connect_complete = bnx2i_cm_connect_cmpl,
2345 .cm_close_complete = bnx2i_cm_close_cmpl,
2346 .cm_abort_complete = bnx2i_cm_abort_cmpl,
2347 .cm_remote_close = bnx2i_cm_remote_close,
2348 .cm_remote_abort = bnx2i_cm_remote_abort,
2349 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2350 .owner = THIS_MODULE
2351};
2352
2353
2354/**
2355 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2356 * @ep: bnx2i endpoint
2357 *
2358 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2359 * register in BAR #0. Whereas in 57710 these register are accessed by
2360 * mapping BAR #1
2361 */
2362int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2363{
2364 u32 cid_num;
2365 u32 reg_off;
2366 u32 first_l4l5;
2367 u32 ctx_sz;
2368 u32 config2;
2369 resource_size_t reg_base;
2370
2371 cid_num = bnx2i_get_cid_num(ep);
2372
2373 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2374 reg_base = pci_resource_start(ep->hba->pcidev,
2375 BNX2X_DOORBELL_PCI_BAR);
2376 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
2377 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2378 goto arm_cq;
2379 }
2380
2381 reg_base = ep->hba->netdev->base_addr;
2382 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2383 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2384 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2385 first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2393 } else
2394 /* 5709 device in normal node and 5706/5708 devices */
2395 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2396
2397 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
2398 MB_KERNEL_CTX_SIZE);
2399 if (!ep->qp.ctx_base)
2400 return -ENOMEM;
2401
2402arm_cq:
2403 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2404 return 0;
2405}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 00000000000..ae4b2d588fd
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include "bnx2i.h"
15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count;
18static int bnx2i_reg_device;
19
20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d"
22#define DRV_MODULE_RELDATE "Mar 25, 2009"
23
24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
26 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28
29MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
30MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION);
33
34static DEFINE_RWLOCK(bnx2i_dev_lock);
35
36unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664);
38MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
39
40unsigned int en_tcp_dack = 1;
41module_param(en_tcp_dack, int, 0664);
42MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
43
44unsigned int error_mask1 = 0x00;
45module_param(error_mask1, int, 0664);
46MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
47
48unsigned int error_mask2 = 0x00;
49module_param(error_mask2, int, 0664);
50MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
51
52unsigned int sq_size;
53module_param(sq_size, int, 0664);
54MODULE_PARM_DESC(sq_size, "Configure SQ size");
55
56unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
57module_param(rq_size, int, 0664);
58MODULE_PARM_DESC(rq_size, "Configure RQ size");
59
60u64 iscsi_error_mask = 0x00;
61
62static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
63
64
65/**
66 * bnx2i_identify_device - identifies NetXtreme II device type
67 * @hba: Adapter structure pointer
68 *
69 * This function identifies the NX2 device type and sets appropriate
70 * queue mailbox register access method, 5709 requires driver to
71 * access MBOX regs using *bin* mode
72 */
73void bnx2i_identify_device(struct bnx2i_hba *hba)
74{
75 hba->cnic_dev_type = 0;
76 if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
77 (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
78 set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
79 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
80 (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
81 set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
82 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
83 (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
84 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
85 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
86 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
87 hba->pci_did == PCI_DEVICE_ID_NX2_57711)
88 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
89}
90
91
92/**
93 * get_adapter_list_head - returns head of adapter list
94 */
95struct bnx2i_hba *get_adapter_list_head(void)
96{
97 struct bnx2i_hba *hba = NULL;
98 struct bnx2i_hba *tmp_hba;
99
100 if (!adapter_count)
101 goto hba_not_found;
102
103 read_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba;
107 break;
108 }
109 }
110 read_unlock(&bnx2i_dev_lock);
111hba_not_found:
112 return hba;
113}
114
115
116/**
117 * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
118 * @cnic: pointer to cnic device instance
119 *
120 */
121struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{
123 struct bnx2i_hba *hba, *temp;
124
125 read_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock);
129 return hba;
130 }
131 }
132 read_unlock(&bnx2i_dev_lock);
133 return NULL;
134}
135
136
137/**
138 * bnx2i_start - cnic callback to initialize & start adapter instance
139 * @handle: transparent handle pointing to adapter structure
140 *
141 * This function maps adapter structure to pcidev structure and initiates
142 * firmware handshake to enable/initialize on chip iscsi components
143 * This bnx2i - cnic interface api callback is issued after following
144 * 2 conditions are met -
145 * a) underlying network interface is up (marked by event 'NETDEV_UP'
146 * from netdev
147 * b) bnx2i adapter instance is registered
148 */
149void bnx2i_start(void *handle)
150{
151#define BNX2I_INIT_POLL_TIME (1000 / HZ)
152 struct bnx2i_hba *hba = handle;
153 int i = HZ;
154
155 bnx2i_send_fw_iscsi_init_msg(hba);
156 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
157 msleep(BNX2I_INIT_POLL_TIME);
158}
159
160
161/**
162 * bnx2i_stop - cnic callback to shutdown adapter instance
163 * @handle: transparent handle pointing to adapter structure
164 *
165 * driver checks if adapter is already in shutdown mode, if not start
166 * the shutdown process
167 */
168void bnx2i_stop(void *handle)
169{
170 struct bnx2i_hba *hba = handle;
171
172 /* check if cleanup happened in GOING_DOWN context */
173 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
174 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
175 &hba->adapter_state))
176 iscsi_host_for_each_session(hba->shost,
177 bnx2i_drop_session);
178}
179
180/**
181 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
182 * @hba: Adapter instance to register
183 *
184 * registers bnx2i adapter instance with the cnic driver while holding the
185 * adapter structure lock
186 */
187void bnx2i_register_device(struct bnx2i_hba *hba)
188{
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return;
192 }
193
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195
196 spin_lock(&hba->lock);
197 bnx2i_reg_device++;
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201}
202
203
204/**
205 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
206 *
207 * registers all bnx2i adapter instances with the cnic driver while holding
208 * the global resource lock
209 */
210void bnx2i_reg_dev_all(void)
211{
212 struct bnx2i_hba *hba, *temp;
213
214 read_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock);
218}
219
220
221/**
222 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
223 * @hba: Adapter instance to unregister
224 *
225 * registers bnx2i adapter instance with the cnic driver while holding
226 * the adapter structure lock
227 */
228static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
229{
230 if (hba->ofld_conns_active ||
231 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
232 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
233 return;
234
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself.
243 */
244 hba->adapter_state = 0;
245 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
246}
247
248/**
249 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
250 *
251 * unregisters all bnx2i adapter instances with the cnic driver while holding
252 * the global resource lock
253 */
254void bnx2i_unreg_dev_all(void)
255{
256 struct bnx2i_hba *hba, *temp;
257
258 read_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock);
262}
263
264
265/**
266 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
267 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle
269 *
270 * Global resource lock and host adapter lock is held during critical sections
271 * below. This routine is called from cnic_register_driver() context and
272 * work horse thread which does majority of device specific initialization
273 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{
276 int rc;
277
278 read_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device &&
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 }
292 read_unlock(&bnx2i_dev_lock);
293
294 write_lock(&bnx2i_dev_lock);
295 list_add_tail(&hba->link, &adapter_list);
296 adapter_count++;
297 write_unlock(&bnx2i_dev_lock);
298 return 0;
299}
300
301
302/**
303 * bnx2i_ulp_init - initialize an adapter instance
304 * @dev: cnic device handle
305 *
306 * Called from cnic_register_driver() context to initialize all enumerated
307 * cnic devices. This routine allocate adapter structure and other
308 * device specific resources.
309 */
310void bnx2i_ulp_init(struct cnic_dev *dev)
311{
312 struct bnx2i_hba *hba;
313
314 /* Allocate a HBA structure for this device */
315 hba = bnx2i_alloc_hba(dev);
316 if (!hba) {
317 printk(KERN_ERR "bnx2i init: hba initialization failed\n");
318 return;
319 }
320
321 /* Get PCI related information and update hba struct members */
322 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
323 if (bnx2i_init_one(hba, dev)) {
324 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
325 bnx2i_free_hba(hba);
326 } else
327 hba->cnic = dev;
328}
329
330
331/**
332 * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
333 * @dev: cnic device handle
334 *
335 */
336void bnx2i_ulp_exit(struct cnic_dev *dev)
337{
338 struct bnx2i_hba *hba;
339
340 hba = bnx2i_find_hba_for_cnic(dev);
341 if (!hba) {
342 printk(KERN_INFO "bnx2i_ulp_exit: hba not "
343 "found, dev 0x%p\n", dev);
344 return;
345 }
346 write_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link);
348 adapter_count--;
349
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 }
358 write_unlock(&bnx2i_dev_lock);
359
360 bnx2i_free_hba(hba);
361}
362
363
364/**
365 * bnx2i_mod_init - module init entry point
366 *
367 * initialize any driver wide global data structures such as endpoint pool,
368 * tcp port manager/queue, sysfs. finally driver will register itself
369 * with the cnic module
370 */
371static int __init bnx2i_mod_init(void)
372{
373 int err;
374
375 printk(KERN_INFO "%s", version);
376
377 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size);
379
380 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) {
383 printk(KERN_ERR "Could not register bnx2i transport.\n");
384 err = -ENOMEM;
385 goto out;
386 }
387
388 err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
389 if (err) {
390 printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
391 goto unreg_xport;
392 }
393
394 return 0;
395
396unreg_xport:
397 iscsi_unregister_transport(&bnx2i_iscsi_transport);
398out:
399 return err;
400}
401
402
403/**
404 * bnx2i_mod_exit - module cleanup/exit entry point
405 *
406 * Global resource lock and host adapter lock is held during critical sections
407 * in this function. Driver will browse through the adapter list, cleans-up
408 * each instance, unregisters iscsi transport name and finally driver will
409 * unregister itself with the cnic module
410 */
411static void __exit bnx2i_mod_exit(void)
412{
413 struct bnx2i_hba *hba;
414
415 write_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link);
419 adapter_count--;
420
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 }
426
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 }
431 write_unlock(&bnx2i_dev_lock);
432
433 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI);
435}
436
437module_init(bnx2i_mod_init);
438module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 00000000000..f7412196f2f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */
14
15#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h>
17#include "bnx2i.h"
18
19struct scsi_transport_template *bnx2i_scsi_xport_template;
20struct iscsi_transport bnx2i_iscsi_transport;
21static struct scsi_host_template bnx2i_host_template;
22
23/*
24 * Global endpoint resource info
25 */
26static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
27
28
29static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
30{
31 int retval = 0;
32
33 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
34 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
35 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
36 retval = -EPERM;
37 return retval;
38}
39
40/**
41 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
42 * @cmd: iscsi cmd struct pointer
43 * @buf_off: absolute buffer offset
44 * @start_bd_off: u32 pointer to return the offset within the BD
45 * indicated by 'start_bd_idx' on which 'buf_off' falls
46 * @start_bd_idx: index of the BD on which 'buf_off' falls
47 *
48 * identifies & marks various bd info for scsi command's imm data,
49 * unsolicited data and the first solicited data seq.
50 */
51static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
52 u32 *start_bd_off, u32 *start_bd_idx)
53{
54 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
55 u32 cur_offset = 0;
56 u32 cur_bd_idx = 0;
57
58 if (buf_off) {
59 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
60 cur_offset += bd_tbl->buffer_length;
61 cur_bd_idx++;
62 bd_tbl++;
63 }
64 }
65
66 *start_bd_off = buf_off - cur_offset;
67 *start_bd_idx = cur_bd_idx;
68}
69
70/**
71 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
72 * @task: transport layer's cmd struct pointer
73 *
74 * identifies & marks various bd info for scsi command's immediate data,
75 * unsolicited data and first solicited data seq which includes BD start
76 * index & BD buf off. his function takes into account iscsi parameter such
77 * as immediate data and unsolicited data is support on this connection.
78 */
79static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
80{
81 struct bnx2i_cmd *cmd = task->dd_data;
82 u32 start_bd_offset;
83 u32 start_bd_idx;
84 u32 buffer_offset = 0;
85 u32 cmd_len = cmd->req.total_data_transfer_length;
86
87 /* if ImmediateData is turned off & IntialR2T is turned on,
88 * there will be no immediate or unsolicited data, just return.
89 */
90 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
91 return;
92
93 /* Immediate data */
94 buffer_offset += task->imm_count;
95 if (task->imm_count == cmd_len)
96 return;
97
98 if (iscsi_task_has_unsol_data(task)) {
99 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
100 &start_bd_offset, &start_bd_idx);
101 cmd->req.ud_buffer_offset = start_bd_offset;
102 cmd->req.ud_start_bd_index = start_bd_idx;
103 buffer_offset += task->unsol_r2t.data_length;
104 }
105
106 if (buffer_offset != cmd_len) {
107 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
108 &start_bd_offset, &start_bd_idx);
109 if ((start_bd_offset > task->conn->session->first_burst) ||
110 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
111 int i = 0;
112
113 iscsi_conn_printk(KERN_ALERT, task->conn,
114 "bnx2i- error, buf offset 0x%x "
115 "bd_valid %d use_sg %d\n",
116 buffer_offset, cmd->io_tbl.bd_valid,
117 scsi_sg_count(cmd->scsi_cmd));
118 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
119 iscsi_conn_printk(KERN_ALERT, task->conn,
120 "bnx2i err, bd[%d]: len %x\n",
121 i, cmd->io_tbl.bd_tbl[i].\
122 buffer_length);
123 }
124 cmd->req.sd_buffer_offset = start_bd_offset;
125 cmd->req.sd_start_bd_index = start_bd_idx;
126 }
127}
128
129
130
131/**
132 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
133 * @hba: adapter instance
134 * @cmd: iscsi cmd struct pointer
135 *
136 * map SG list
137 */
138static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
139{
140 struct scsi_cmnd *sc = cmd->scsi_cmd;
141 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
142 struct scatterlist *sg;
143 int byte_count = 0;
144 int bd_count = 0;
145 int sg_count;
146 int sg_len;
147 u64 addr;
148 int i;
149
150 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
151
152 sg_count = scsi_dma_map(sc);
153
154 scsi_for_each_sg(sc, sg, sg_count, i) {
155 sg_len = sg_dma_len(sg);
156 addr = (u64) sg_dma_address(sg);
157 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
158 bd[bd_count].buffer_addr_hi = addr >> 32;
159 bd[bd_count].buffer_length = sg_len;
160 bd[bd_count].flags = 0;
161 if (bd_count == 0)
162 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
163
164 byte_count += sg_len;
165 bd_count++;
166 }
167
168 if (bd_count)
169 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
170
171 BUG_ON(byte_count != scsi_bufflen(sc));
172 return bd_count;
173}
174
175/**
176 * bnx2i_iscsi_map_sg_list - maps SG list
177 * @cmd: iscsi cmd struct pointer
178 *
179 * creates BD list table for the command
180 */
181static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
182{
183 int bd_count;
184
185 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
186 if (!bd_count) {
187 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
188
189 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
190 bd[0].buffer_length = bd[0].flags = 0;
191 }
192 cmd->io_tbl.bd_valid = bd_count;
193}
194
195
196/**
197 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
198 * @cmd: iscsi cmd struct pointer
199 *
200 * unmap IO buffers and invalidate the BD table
201 */
202void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
203{
204 struct scsi_cmnd *sc = cmd->scsi_cmd;
205
206 if (cmd->io_tbl.bd_valid && sc) {
207 scsi_dma_unmap(sc);
208 cmd->io_tbl.bd_valid = 0;
209 }
210}
211
212static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
213{
214 memset(&cmd->req, 0x00, sizeof(cmd->req));
215 cmd->req.op_code = 0xFF;
216 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
217 cmd->req.bd_list_addr_hi =
218 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
219
220}
221
222
223/**
224 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
225 * @hba: pointer to adapter instance
226 * @conn: pointer to iscsi connection
227 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
228 *
229 * update iscsi cid table entry with connection pointer. This enables
230 * driver to quickly get hold of connection structure pointer in
231 * completion/interrupt thread using iscsi context ID
232 */
233static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
234 struct bnx2i_conn *bnx2i_conn,
235 u32 iscsi_cid)
236{
237 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
238 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
239 "conn bind - entry #%d not free\n", iscsi_cid);
240 return -EBUSY;
241 }
242
243 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
244 return 0;
245}
246
247
248/**
249 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
250 * @hba: pointer to adapter instance
251 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
252 */
253struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
254 u16 iscsi_cid)
255{
256 if (!hba->cid_que.conn_cid_tbl) {
257 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
258 return NULL;
259
260 } else if (iscsi_cid >= hba->max_active_conns) {
261 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
262 return NULL;
263 }
264 return hba->cid_que.conn_cid_tbl[iscsi_cid];
265}
266
267
268/**
269 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
270 * @hba: pointer to adapter instance
271 */
272static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
273{
274 int idx;
275
276 if (!hba->cid_que.cid_free_cnt)
277 return -1;
278
279 idx = hba->cid_que.cid_q_cons_idx;
280 hba->cid_que.cid_q_cons_idx++;
281 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
282 hba->cid_que.cid_q_cons_idx = 0;
283
284 hba->cid_que.cid_free_cnt--;
285 return hba->cid_que.cid_que[idx];
286}
287
288
289/**
290 * bnx2i_free_iscsi_cid - returns tcp port to free list
291 * @hba: pointer to adapter instance
292 * @iscsi_cid: iscsi context ID to free
293 */
294static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
295{
296 int idx;
297
298 if (iscsi_cid == (u16) -1)
299 return;
300
301 hba->cid_que.cid_free_cnt++;
302
303 idx = hba->cid_que.cid_q_prod_idx;
304 hba->cid_que.cid_que[idx] = iscsi_cid;
305 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
306 hba->cid_que.cid_q_prod_idx++;
307 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
308 hba->cid_que.cid_q_prod_idx = 0;
309}
310
311
312/**
313 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
314 * @hba: pointer to adapter instance
315 *
316 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
317 * and initialize table attributes
318 */
319static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
320{
321 int mem_size;
322 int i;
323
324 mem_size = hba->max_active_conns * sizeof(u32);
325 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
326
327 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
328 if (!hba->cid_que.cid_que_base)
329 return -ENOMEM;
330
331 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
332 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
333 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
334 if (!hba->cid_que.conn_cid_tbl) {
335 kfree(hba->cid_que.cid_que_base);
336 hba->cid_que.cid_que_base = NULL;
337 return -ENOMEM;
338 }
339
340 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
341 hba->cid_que.cid_q_prod_idx = 0;
342 hba->cid_que.cid_q_cons_idx = 0;
343 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
344 hba->cid_que.cid_free_cnt = hba->max_active_conns;
345
346 for (i = 0; i < hba->max_active_conns; i++) {
347 hba->cid_que.cid_que[i] = i;
348 hba->cid_que.conn_cid_tbl[i] = NULL;
349 }
350 return 0;
351}
352
353
354/**
355 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
356 * @hba: pointer to adapter instance
357 */
358static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
359{
360 kfree(hba->cid_que.cid_que_base);
361 hba->cid_que.cid_que_base = NULL;
362
363 kfree(hba->cid_que.conn_cid_tbl);
364 hba->cid_que.conn_cid_tbl = NULL;
365}
366
367
368/**
369 * bnx2i_alloc_ep - allocates ep structure from global pool
370 * @hba: pointer to adapter instance
371 *
372 * routine allocates a free endpoint structure from global pool and
373 * a tcp port to be used for this connection. Global resource lock,
374 * 'bnx2i_resc_lock' is held while accessing shared global data structures
375 */
376static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
377{
378 struct iscsi_endpoint *ep;
379 struct bnx2i_endpoint *bnx2i_ep;
380
381 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
382 if (!ep) {
383 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
384 return NULL;
385 }
386
387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++;
393 init_waitqueue_head(&bnx2i_ep->ofld_wait);
394 return ep;
395}
396
397
398/**
399 * bnx2i_free_ep - free endpoint
400 * @ep: pointer to iscsi endpoint structure
401 */
402static void bnx2i_free_ep(struct iscsi_endpoint *ep)
403{
404 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bnx2i_resc_lock, flags);
408 bnx2i_ep->state = EP_STATE_IDLE;
409 bnx2i_ep->hba->ofld_conns_active--;
410
411 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
412 if (bnx2i_ep->conn) {
413 bnx2i_ep->conn->ep = NULL;
414 bnx2i_ep->conn = NULL;
415 }
416
417 bnx2i_ep->hba = NULL;
418 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
419 iscsi_destroy_endpoint(ep);
420}
421
422
423/**
424 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
425 * @hba: adapter instance pointer
426 * @session: iscsi session pointer
427 * @cmd: iscsi command structure
428 */
429static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
430 struct bnx2i_cmd *cmd)
431{
432 struct io_bdt *io = &cmd->io_tbl;
433 struct iscsi_bd *bd;
434
435 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
436 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
437 &io->bd_tbl_dma, GFP_KERNEL);
438 if (!io->bd_tbl) {
439 iscsi_session_printk(KERN_ERR, session, "Could not "
440 "allocate bdt.\n");
441 return -ENOMEM;
442 }
443 io->bd_valid = 0;
444 return 0;
445}
446
447/**
448 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
449 * @hba: adapter instance pointer
450 * @session: iscsi session pointer
451 * @cmd: iscsi command structure
452 */
453static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
454 struct iscsi_session *session)
455{
456 int i;
457
458 for (i = 0; i < session->cmds_max; i++) {
459 struct iscsi_task *task = session->cmds[i];
460 struct bnx2i_cmd *cmd = task->dd_data;
461
462 if (cmd->io_tbl.bd_tbl)
463 dma_free_coherent(&hba->pcidev->dev,
464 ISCSI_MAX_BDS_PER_CMD *
465 sizeof(struct iscsi_bd),
466 cmd->io_tbl.bd_tbl,
467 cmd->io_tbl.bd_tbl_dma);
468 }
469
470}
471
472
473/**
474 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
475 * @hba: adapter instance pointer
476 * @session: iscsi session pointer
477 */
478static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
479 struct iscsi_session *session)
480{
481 int i;
482
483 for (i = 0; i < session->cmds_max; i++) {
484 struct iscsi_task *task = session->cmds[i];
485 struct bnx2i_cmd *cmd = task->dd_data;
486
487 /* Anil */
488 task->hdr = &cmd->hdr;
489 task->hdr_max = sizeof(struct iscsi_hdr);
490
491 if (bnx2i_alloc_bdt(hba, session, cmd))
492 goto free_bdts;
493 }
494
495 return 0;
496
497free_bdts:
498 bnx2i_destroy_cmd_pool(hba, session);
499 return -ENOMEM;
500}
501
502
503/**
504 * bnx2i_setup_mp_bdt - allocate BD table resources
505 * @hba: pointer to adapter structure
506 *
507 * Allocate memory for dummy buffer and associated BD
508 * table to be used by middle path (MP) requests
509 */
510static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
511{
512 int rc = 0;
513 struct iscsi_bd *mp_bdt;
514 u64 addr;
515
516 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
517 &hba->mp_bd_dma, GFP_KERNEL);
518 if (!hba->mp_bd_tbl) {
519 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
520 rc = -1;
521 goto out;
522 }
523
524 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
525 &hba->dummy_buf_dma, GFP_KERNEL);
526 if (!hba->dummy_buffer) {
527 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
528 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 hba->mp_bd_tbl, hba->mp_bd_dma);
530 hba->mp_bd_tbl = NULL;
531 rc = -1;
532 goto out;
533 }
534
535 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
536 addr = (unsigned long) hba->dummy_buf_dma;
537 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
538 mp_bdt->buffer_addr_hi = addr >> 32;
539 mp_bdt->buffer_length = PAGE_SIZE;
540 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
541 ISCSI_BD_FIRST_IN_BD_CHAIN;
542out:
543 return rc;
544}
545
546
547/**
548 * bnx2i_free_mp_bdt - releases ITT back to free pool
549 * @hba: pointer to adapter instance
550 *
551 * free MP dummy buffer and associated BD table
552 */
553static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
554{
555 if (hba->mp_bd_tbl) {
556 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
557 hba->mp_bd_tbl, hba->mp_bd_dma);
558 hba->mp_bd_tbl = NULL;
559 }
560 if (hba->dummy_buffer) {
561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
562 hba->dummy_buffer, hba->dummy_buf_dma);
563 hba->dummy_buffer = NULL;
564 }
565 return;
566}
567
568/**
569 * bnx2i_drop_session - notifies iscsid of connection error.
570 * @hba: adapter instance pointer
571 * @session: iscsi session pointer
572 *
573 * This notifies iscsid that there is a error, so it can initiate
574 * recovery.
575 *
576 * This relies on caller using the iscsi class iterator so the object
577 * is refcounted and does not disapper from under us.
578 */
579void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
580{
581 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
582}
583
584/**
585 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
586 * @hba: pointer to adapter instance
587 * @ep: pointer to endpoint (transport indentifier) structure
588 *
589 * EP destroy queue manager
590 */
591static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
592 struct bnx2i_endpoint *ep)
593{
594 write_lock_bh(&hba->ep_rdwr_lock);
595 list_add_tail(&ep->link, &hba->ep_destroy_list);
596 write_unlock_bh(&hba->ep_rdwr_lock);
597 return 0;
598}
599
600/**
601 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
602 *
603 * @hba: pointer to adapter instance
604 * @ep: pointer to endpoint (transport indentifier) structure
605 *
606 * EP destroy queue manager
607 */
608static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
609 struct bnx2i_endpoint *ep)
610{
611 write_lock_bh(&hba->ep_rdwr_lock);
612 list_del_init(&ep->link);
613 write_unlock_bh(&hba->ep_rdwr_lock);
614
615 return 0;
616}
617
618/**
619 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
620 * @hba: pointer to adapter instance
621 * @ep: pointer to endpoint (transport indentifier) structure
622 *
623 * pending conn offload completion queue manager
624 */
625static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
626 struct bnx2i_endpoint *ep)
627{
628 write_lock_bh(&hba->ep_rdwr_lock);
629 list_add_tail(&ep->link, &hba->ep_ofld_list);
630 write_unlock_bh(&hba->ep_rdwr_lock);
631 return 0;
632}
633
634/**
635 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
636 * @hba: pointer to adapter instance
637 * @ep: pointer to endpoint (transport indentifier) structure
638 *
639 * pending conn offload completion queue manager
640 */
641static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
642 struct bnx2i_endpoint *ep)
643{
644 write_lock_bh(&hba->ep_rdwr_lock);
645 list_del_init(&ep->link);
646 write_unlock_bh(&hba->ep_rdwr_lock);
647 return 0;
648}
649
650
651/**
652 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
653 *
654 * @hba: pointer to adapter instance
655 * @iscsi_cid: iscsi context ID to find
656 *
657 */
658struct bnx2i_endpoint *
659bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
660{
661 struct list_head *list;
662 struct list_head *tmp;
663 struct bnx2i_endpoint *ep;
664
665 read_lock_bh(&hba->ep_rdwr_lock);
666 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
667 ep = (struct bnx2i_endpoint *)list;
668
669 if (ep->ep_iscsi_cid == iscsi_cid)
670 break;
671 ep = NULL;
672 }
673 read_unlock_bh(&hba->ep_rdwr_lock);
674
675 if (!ep)
676 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
677 return ep;
678}
679
680
681/**
682 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
683 * @hba: pointer to adapter instance
684 * @iscsi_cid: iscsi context ID to find
685 *
686 */
687struct bnx2i_endpoint *
688bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
689{
690 struct list_head *list;
691 struct list_head *tmp;
692 struct bnx2i_endpoint *ep;
693
694 read_lock_bh(&hba->ep_rdwr_lock);
695 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
696 ep = (struct bnx2i_endpoint *)list;
697
698 if (ep->ep_iscsi_cid == iscsi_cid)
699 break;
700 ep = NULL;
701 }
702 read_unlock_bh(&hba->ep_rdwr_lock);
703
704 if (!ep)
705 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
706
707 return ep;
708}
709
710/**
711 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
712 * @hba: pointer to adapter instance
713 * @shost: scsi host pointer
714 *
715 * Initializes 'can_queue' parameter based on how many outstanding commands
716 * the device can handle. Each device 5708/5709/57710 has different
717 * capabilities
718 */
719static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
720 struct Scsi_Host *shost)
721{
722 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
723 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
724 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
725 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
726 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
727 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
728 else
729 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
730}
731
732
733/**
734 * bnx2i_alloc_hba - allocate and init adapter instance
735 * @cnic: cnic device pointer
736 *
737 * allocate & initialize adapter structure and call other
738 * support routines to do per adapter initialization
739 */
740struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
741{
742 struct Scsi_Host *shost;
743 struct bnx2i_hba *hba;
744
745 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
746 if (!shost)
747 return NULL;
748 shost->dma_boundary = cnic->pcidev->dma_mask;
749 shost->transportt = bnx2i_scsi_xport_template;
750 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
751 shost->max_channel = 0;
752 shost->max_lun = 512;
753 shost->max_cmd_len = 16;
754
755 hba = iscsi_host_priv(shost);
756 hba->shost = shost;
757 hba->netdev = cnic->netdev;
758 /* Get PCI related information and update hba struct members */
759 hba->pcidev = cnic->pcidev;
760 pci_dev_get(hba->pcidev);
761 hba->pci_did = hba->pcidev->device;
762 hba->pci_vid = hba->pcidev->vendor;
763 hba->pci_sdid = hba->pcidev->subsystem_device;
764 hba->pci_svid = hba->pcidev->subsystem_vendor;
765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
767 bnx2i_identify_device(hba);
768
769 bnx2i_identify_device(hba);
770 bnx2i_setup_host_queue_size(hba, shost);
771
772 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
773 hba->regview = ioremap_nocache(hba->netdev->base_addr,
774 BNX2_MQ_CONFIG2);
775 if (!hba->regview)
776 goto ioreg_map_err;
777 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
778 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
779 if (!hba->regview)
780 goto ioreg_map_err;
781 }
782
783 if (bnx2i_setup_mp_bdt(hba))
784 goto mp_bdt_mem_err;
785
786 INIT_LIST_HEAD(&hba->ep_ofld_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock);
789
790 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
791
792 /* different values for 5708/5709/57710 */
793 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
794
795 if (bnx2i_setup_free_cid_que(hba))
796 goto cid_que_err;
797
798 /* SQ/RQ/CQ size can be changed via sysfx interface */
799 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
800 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
801 hba->max_sqes = sq_size;
802 else
803 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
804 } else { /* 5706/5708/5709 */
805 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
806 hba->max_sqes = sq_size;
807 else
808 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
809 }
810
811 hba->max_rqes = rq_size;
812 hba->max_cqes = hba->max_sqes + rq_size;
813 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
814 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
815 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
816 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
817 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
818
819 hba->num_ccell = hba->max_sqes / 2;
820
821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock);
823
824 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem;
826 return hba;
827
828free_dump_mem:
829 bnx2i_release_free_cid_que(hba);
830cid_que_err:
831 bnx2i_free_mp_bdt(hba);
832mp_bdt_mem_err:
833 if (hba->regview) {
834 iounmap(hba->regview);
835 hba->regview = NULL;
836 }
837ioreg_map_err:
838 pci_dev_put(hba->pcidev);
839 scsi_host_put(shost);
840 return NULL;
841}
842
843/**
844 * bnx2i_free_hba- releases hba structure and resources held by the adapter
845 * @hba: pointer to adapter instance
846 *
847 * free adapter structure and call various cleanup routines.
848 */
849void bnx2i_free_hba(struct bnx2i_hba *hba)
850{
851 struct Scsi_Host *shost = hba->shost;
852
853 iscsi_host_remove(shost);
854 INIT_LIST_HEAD(&hba->ep_ofld_list);
855 INIT_LIST_HEAD(&hba->ep_destroy_list);
856 pci_dev_put(hba->pcidev);
857
858 if (hba->regview) {
859 iounmap(hba->regview);
860 hba->regview = NULL;
861 }
862 bnx2i_free_mp_bdt(hba);
863 bnx2i_release_free_cid_que(hba);
864 iscsi_host_free(shost);
865}
866
867/**
868 * bnx2i_conn_free_login_resources - free DMA resources used for login process
869 * @hba: pointer to adapter instance
870 * @bnx2i_conn: iscsi connection pointer
871 *
872 * Login related resources, mostly BDT & payload DMA memory is freed
873 */
874static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
875 struct bnx2i_conn *bnx2i_conn)
876{
877 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
878 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
879 bnx2i_conn->gen_pdu.resp_bd_tbl,
880 bnx2i_conn->gen_pdu.resp_bd_dma);
881 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
882 }
883
884 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
886 bnx2i_conn->gen_pdu.req_bd_tbl,
887 bnx2i_conn->gen_pdu.req_bd_dma);
888 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
889 }
890
891 if (bnx2i_conn->gen_pdu.resp_buf) {
892 dma_free_coherent(&hba->pcidev->dev,
893 ISCSI_DEF_MAX_RECV_SEG_LEN,
894 bnx2i_conn->gen_pdu.resp_buf,
895 bnx2i_conn->gen_pdu.resp_dma_addr);
896 bnx2i_conn->gen_pdu.resp_buf = NULL;
897 }
898
899 if (bnx2i_conn->gen_pdu.req_buf) {
900 dma_free_coherent(&hba->pcidev->dev,
901 ISCSI_DEF_MAX_RECV_SEG_LEN,
902 bnx2i_conn->gen_pdu.req_buf,
903 bnx2i_conn->gen_pdu.req_dma_addr);
904 bnx2i_conn->gen_pdu.req_buf = NULL;
905 }
906}
907
908/**
909 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
910 * @hba: pointer to adapter instance
911 * @bnx2i_conn: iscsi connection pointer
912 *
913 * Mgmt task DNA resources are allocated in this routine.
914 */
915static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
916 struct bnx2i_conn *bnx2i_conn)
917{
918 /* Allocate memory for login request/response buffers */
919 bnx2i_conn->gen_pdu.req_buf =
920 dma_alloc_coherent(&hba->pcidev->dev,
921 ISCSI_DEF_MAX_RECV_SEG_LEN,
922 &bnx2i_conn->gen_pdu.req_dma_addr,
923 GFP_KERNEL);
924 if (bnx2i_conn->gen_pdu.req_buf == NULL)
925 goto login_req_buf_failure;
926
927 bnx2i_conn->gen_pdu.req_buf_size = 0;
928 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
929
930 bnx2i_conn->gen_pdu.resp_buf =
931 dma_alloc_coherent(&hba->pcidev->dev,
932 ISCSI_DEF_MAX_RECV_SEG_LEN,
933 &bnx2i_conn->gen_pdu.resp_dma_addr,
934 GFP_KERNEL);
935 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
936 goto login_resp_buf_failure;
937
938 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
939 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
940
941 bnx2i_conn->gen_pdu.req_bd_tbl =
942 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
943 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
944 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
945 goto login_req_bd_tbl_failure;
946
947 bnx2i_conn->gen_pdu.resp_bd_tbl =
948 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
949 &bnx2i_conn->gen_pdu.resp_bd_dma,
950 GFP_KERNEL);
951 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
952 goto login_resp_bd_tbl_failure;
953
954 return 0;
955
956login_resp_bd_tbl_failure:
957 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
958 bnx2i_conn->gen_pdu.req_bd_tbl,
959 bnx2i_conn->gen_pdu.req_bd_dma);
960 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
961
962login_req_bd_tbl_failure:
963 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.resp_buf,
965 bnx2i_conn->gen_pdu.resp_dma_addr);
966 bnx2i_conn->gen_pdu.resp_buf = NULL;
967login_resp_buf_failure:
968 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
969 bnx2i_conn->gen_pdu.req_buf,
970 bnx2i_conn->gen_pdu.req_dma_addr);
971 bnx2i_conn->gen_pdu.req_buf = NULL;
972login_req_buf_failure:
973 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
974 "login resource alloc failed!!\n");
975 return -ENOMEM;
976
977}
978
979
980/**
981 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
982 * @bnx2i_conn: iscsi connection pointer
983 *
984 * Allocates buffers and BD tables before shipping requests to cnic
985 * for PDUs prepared by 'iscsid' daemon
986 */
987static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
988{
989 struct iscsi_bd *bd_tbl;
990
991 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
992
993 bd_tbl->buffer_addr_hi =
994 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
995 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
996 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
997 bnx2i_conn->gen_pdu.req_buf;
998 bd_tbl->reserved0 = 0;
999 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1000 ISCSI_BD_FIRST_IN_BD_CHAIN;
1001
1002 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1003 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1004 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1005 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1006 bd_tbl->reserved0 = 0;
1007 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1008 ISCSI_BD_FIRST_IN_BD_CHAIN;
1009}
1010
1011
1012/**
1013 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1014 * @task: transport layer task pointer
1015 *
1016 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1017 * Nop-out and Logout requests flow through this path.
1018 */
1019static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1020{
1021 struct bnx2i_cmd *cmd = task->dd_data;
1022 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1023 int rc = 0;
1024 char *buf;
1025 int data_len;
1026
1027 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1028 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1029 case ISCSI_OP_LOGIN:
1030 bnx2i_send_iscsi_login(bnx2i_conn, task);
1031 break;
1032 case ISCSI_OP_NOOP_OUT:
1033 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1034 buf = bnx2i_conn->gen_pdu.req_buf;
1035 if (data_len)
1036 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1037 RESERVED_ITT,
1038 buf, data_len, 1);
1039 else
1040 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1041 RESERVED_ITT,
1042 NULL, 0, 1);
1043 break;
1044 case ISCSI_OP_LOGOUT:
1045 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1046 break;
1047 case ISCSI_OP_SCSI_TMFUNC:
1048 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1049 break;
1050 default:
1051 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1052 "send_gen: unsupported op 0x%x\n",
1053 task->hdr->opcode);
1054 }
1055 return rc;
1056}
1057
1058
1059/**********************************************************************
1060 * SCSI-ML Interface
1061 **********************************************************************/
1062
1063/**
1064 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1065 * @sc: SCSI-ML command pointer
1066 * @cmd: iscsi cmd pointer
1067 */
1068static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1069{
1070 u32 dword;
1071 int lpcnt;
1072 u8 *srcp;
1073 u32 *dstp;
1074 u32 scsi_lun[2];
1075
1076 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1077 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1078 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1079
1080 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1081 srcp = (u8 *) sc->cmnd;
1082 dstp = (u32 *) cmd->req.cdb;
1083 while (lpcnt--) {
1084 memcpy(&dword, (const void *) srcp, 4);
1085 *dstp = cpu_to_be32(dword);
1086 srcp += 4;
1087 dstp++;
1088 }
1089 if (sc->cmd_len & 0x3) {
1090 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1091 *dstp = cpu_to_be32(dword);
1092 }
1093}
1094
1095static void bnx2i_cleanup_task(struct iscsi_task *task)
1096{
1097 struct iscsi_conn *conn = task->conn;
1098 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1099 struct bnx2i_hba *hba = bnx2i_conn->hba;
1100
1101 /*
1102 * mgmt task or cmd was never sent to us to transmit.
1103 */
1104 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1105 return;
1106 /*
1107 * need to clean-up task context to claim dma buffers
1108 */
1109 if (task->state == ISCSI_TASK_ABRT_TMF) {
1110 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1111
1112 spin_unlock_bh(&conn->session->lock);
1113 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1114 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1115 spin_lock_bh(&conn->session->lock);
1116 }
1117 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1118}
1119
1120/**
1121 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1122 * @conn: transport layer conn structure pointer
1123 * @task: transport layer command structure pointer
1124 */
1125static int
1126bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1127{
1128 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1129 struct bnx2i_cmd *cmd = task->dd_data;
1130
1131 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1132
1133 bnx2i_setup_cmd_wqe_template(cmd);
1134 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1135 if (task->data_count) {
1136 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1137 task->data_count);
1138 bnx2i_conn->gen_pdu.req_wr_ptr =
1139 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1140 }
1141 cmd->conn = conn->dd_data;
1142 cmd->scsi_cmd = NULL;
1143 return bnx2i_iscsi_send_generic_request(task);
1144}
1145
1146/**
1147 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1148 * @task: transport layer command structure pointer
1149 *
1150 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1151 */
1152static int bnx2i_task_xmit(struct iscsi_task *task)
1153{
1154 struct iscsi_conn *conn = task->conn;
1155 struct iscsi_session *session = conn->session;
1156 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1157 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1159 struct scsi_cmnd *sc = task->sc;
1160 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN;
1168
1169 /*
1170 * If there is no scsi_cmnd this must be a mgmt task
1171 */
1172 if (!sc)
1173 return bnx2i_mtask_xmit(conn, task);
1174
1175 bnx2i_setup_cmd_wqe_template(cmd);
1176 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1177 cmd->conn = bnx2i_conn;
1178 cmd->scsi_cmd = sc;
1179 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1180 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1181
1182 bnx2i_iscsi_map_sg_list(cmd);
1183 bnx2i_cpy_scsi_cdb(sc, cmd);
1184
1185 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1186 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1187 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1188 cmd->req.itt = task->itt |
1189 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1190 bnx2i_setup_write_cmd_bd_info(task);
1191 } else {
1192 if (scsi_bufflen(sc))
1193 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1194 cmd->req.itt = task->itt |
1195 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1196 }
1197
1198 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1199 if (!cmd->io_tbl.bd_valid) {
1200 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1201 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1202 cmd->req.num_bds = 1;
1203 }
1204
1205 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1206 return 0;
1207}
1208
1209/**
1210 * bnx2i_session_create - create a new iscsi session
1211 * @cmds_max: max commands supported
1212 * @qdepth: scsi queue depth to support
1213 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1214 *
1215 * Creates a new iSCSI session instance on given device.
1216 */
1217static struct iscsi_cls_session *
1218bnx2i_session_create(struct iscsi_endpoint *ep,
1219 uint16_t cmds_max, uint16_t qdepth,
1220 uint32_t initial_cmdsn)
1221{
1222 struct Scsi_Host *shost;
1223 struct iscsi_cls_session *cls_session;
1224 struct bnx2i_hba *hba;
1225 struct bnx2i_endpoint *bnx2i_ep;
1226
1227 if (!ep) {
1228 printk(KERN_ERR "bnx2i: missing ep.\n");
1229 return NULL;
1230 }
1231
1232 bnx2i_ep = ep->dd_data;
1233 shost = bnx2i_ep->hba->shost;
1234 hba = iscsi_host_priv(shost);
1235 if (bnx2i_adapter_ready(hba))
1236 return NULL;
1237
1238 /*
1239 * user can override hw limit as long as it is within
1240 * the min/max.
1241 */
1242 if (cmds_max > hba->max_sqes)
1243 cmds_max = hba->max_sqes;
1244 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1245 cmds_max = BNX2I_SQ_WQES_MIN;
1246
1247 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1248 cmds_max, sizeof(struct bnx2i_cmd),
1249 initial_cmdsn, ISCSI_MAX_TARGET);
1250 if (!cls_session)
1251 return NULL;
1252
1253 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1254 goto session_teardown;
1255 return cls_session;
1256
1257session_teardown:
1258 iscsi_session_teardown(cls_session);
1259 return NULL;
1260}
1261
1262
1263/**
1264 * bnx2i_session_destroy - destroys iscsi session
1265 * @cls_session: pointer to iscsi cls session
1266 *
1267 * Destroys previously created iSCSI session instance and releases
1268 * all resources held by it
1269 */
1270static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1271{
1272 struct iscsi_session *session = cls_session->dd_data;
1273 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1274 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1275
1276 bnx2i_destroy_cmd_pool(hba, session);
1277 iscsi_session_teardown(cls_session);
1278}
1279
1280
1281/**
1282 * bnx2i_conn_create - create iscsi connection instance
1283 * @cls_session: pointer to iscsi cls session
1284 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1285 *
1286 * Creates a new iSCSI connection instance for a given session
1287 */
1288static struct iscsi_cls_conn *
1289bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1290{
1291 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1292 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1293 struct bnx2i_conn *bnx2i_conn;
1294 struct iscsi_cls_conn *cls_conn;
1295 struct iscsi_conn *conn;
1296
1297 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1298 cid);
1299 if (!cls_conn)
1300 return NULL;
1301 conn = cls_conn->dd_data;
1302
1303 bnx2i_conn = conn->dd_data;
1304 bnx2i_conn->cls_conn = cls_conn;
1305 bnx2i_conn->hba = hba;
1306 /* 'ep' ptr will be assigned in bind() call */
1307 bnx2i_conn->ep = NULL;
1308 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1309
1310 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1311 iscsi_conn_printk(KERN_ALERT, conn,
1312 "conn_new: login resc alloc failed!!\n");
1313 goto free_conn;
1314 }
1315
1316 return cls_conn;
1317
1318free_conn:
1319 iscsi_conn_teardown(cls_conn);
1320 return NULL;
1321}
1322
1323/**
1324 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1325 * @cls_session: pointer to iscsi cls session
1326 * @cls_conn: pointer to iscsi cls conn
1327 * @transport_fd: 64-bit EP handle
1328 * @is_leading: leading connection on this session?
1329 *
1330 * Binds together iSCSI session instance, iSCSI connection instance
1331 * and the TCP connection. This routine returns error code if
1332 * TCP connection does not belong on the device iSCSI sess/conn
1333 * is bound
1334 */
1335static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1336 struct iscsi_cls_conn *cls_conn,
1337 uint64_t transport_fd, int is_leading)
1338{
1339 struct iscsi_conn *conn = cls_conn->dd_data;
1340 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1341 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 struct bnx2i_endpoint *bnx2i_ep;
1344 struct iscsi_endpoint *ep;
1345 int ret_code;
1346
1347 ep = iscsi_lookup_endpoint(transport_fd);
1348 if (!ep)
1349 return -EINVAL;
1350
1351 bnx2i_ep = ep->dd_data;
1352 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1353 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1354 /* Peer disconnect via' FIN or RST */
1355 return -EINVAL;
1356
1357 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1358 return -EINVAL;
1359
1360 if (bnx2i_ep->hba != hba) {
1361 /* Error - TCP connection does not belong to this device
1362 */
1363 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1364 "conn bind, ep=0x%p (%s) does not",
1365 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1366 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1367 "belong to hba (%s)\n",
1368 hba->netdev->name);
1369 return -EEXIST;
1370 }
1371
1372 bnx2i_ep->conn = bnx2i_conn;
1373 bnx2i_conn->ep = bnx2i_ep;
1374 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1375 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1376 bnx2i_conn->is_bound = 1;
1377
1378 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1379 bnx2i_ep->ep_iscsi_cid);
1380
1381 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1382 * driver needs to explicitly replenish RQ index during setup.
1383 */
1384 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1385 bnx2i_put_rq_buf(bnx2i_conn, 0);
1386
1387 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1388 return ret_code;
1389}
1390
1391
1392/**
1393 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1394 * @cls_conn: pointer to iscsi cls conn
1395 *
1396 * Destroy an iSCSI connection instance and release memory resources held by
1397 * this connection
1398 */
1399static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1400{
1401 struct iscsi_conn *conn = cls_conn->dd_data;
1402 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1403 struct Scsi_Host *shost;
1404 struct bnx2i_hba *hba;
1405
1406 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1407 hba = iscsi_host_priv(shost);
1408
1409 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1410 iscsi_conn_teardown(cls_conn);
1411}
1412
1413
1414/**
1415 * bnx2i_conn_get_param - return iscsi connection parameter to caller
1416 * @cls_conn: pointer to iscsi cls conn
1417 * @param: parameter type identifier
1418 * @buf: buffer pointer
1419 *
1420 * returns iSCSI connection parameters
1421 */
1422static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1423 enum iscsi_param param, char *buf)
1424{
1425 struct iscsi_conn *conn = cls_conn->dd_data;
1426 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1427 int len = 0;
1428
1429 switch (param) {
1430 case ISCSI_PARAM_CONN_PORT:
1431 if (bnx2i_conn->ep)
1432 len = sprintf(buf, "%hu\n",
1433 bnx2i_conn->ep->cm_sk->dst_port);
1434 break;
1435 case ISCSI_PARAM_CONN_ADDRESS:
1436 if (bnx2i_conn->ep)
1437 len = sprintf(buf, NIPQUAD_FMT "\n",
1438 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
1439 break;
1440 default:
1441 return iscsi_conn_get_param(cls_conn, param, buf);
1442 }
1443
1444 return len;
1445}
1446
1447/**
1448 * bnx2i_host_get_param - returns host (adapter) related parameters
1449 * @shost: scsi host pointer
1450 * @param: parameter type identifier
1451 * @buf: buffer pointer
1452 */
1453static int bnx2i_host_get_param(struct Scsi_Host *shost,
1454 enum iscsi_host_param param, char *buf)
1455{
1456 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1457 int len = 0;
1458
1459 switch (param) {
1460 case ISCSI_HOST_PARAM_HWADDRESS:
1461 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1462 break;
1463 case ISCSI_HOST_PARAM_NETDEV_NAME:
1464 len = sprintf(buf, "%s\n", hba->netdev->name);
1465 break;
1466 default:
1467 return iscsi_host_get_param(shost, param, buf);
1468 }
1469 return len;
1470}
1471
1472/**
1473 * bnx2i_conn_start - completes iscsi connection migration to FFP
1474 * @cls_conn: pointer to iscsi cls conn
1475 *
1476 * last call in FFP migration to handover iscsi conn to the driver
1477 */
1478static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1479{
1480 struct iscsi_conn *conn = cls_conn->dd_data;
1481 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1482
1483 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1484 bnx2i_update_iscsi_conn(conn);
1485
1486 /*
1487 * this should normally not sleep for a long time so it should
1488 * not disrupt the caller.
1489 */
1490 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1491 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1492 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1493 add_timer(&bnx2i_conn->ep->ofld_timer);
1494 /* update iSCSI context for this conn, wait for CNIC to complete */
1495 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1496 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1497
1498 if (signal_pending(current))
1499 flush_signals(current);
1500 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1501
1502 iscsi_conn_start(cls_conn);
1503 return 0;
1504}
1505
1506
1507/**
1508 * bnx2i_conn_get_stats - returns iSCSI stats
1509 * @cls_conn: pointer to iscsi cls conn
1510 * @stats: pointer to iscsi statistic struct
1511 */
1512static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1513 struct iscsi_stats *stats)
1514{
1515 struct iscsi_conn *conn = cls_conn->dd_data;
1516
1517 stats->txdata_octets = conn->txdata_octets;
1518 stats->rxdata_octets = conn->rxdata_octets;
1519 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1520 stats->dataout_pdus = conn->dataout_pdus_cnt;
1521 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1522 stats->datain_pdus = conn->datain_pdus_cnt;
1523 stats->r2t_pdus = conn->r2t_pdus_cnt;
1524 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1525 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1526 stats->custom_length = 3;
1527 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1528 stats->custom[2].value = conn->eh_abort_cnt;
1529 stats->digest_err = 0;
1530 stats->timeout_err = 0;
1531 stats->custom_length = 0;
1532}
1533
1534
1535/**
1536 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1537 * @dst_addr: target IP address
1538 *
1539 * check if route resolves to BNX2 device
1540 */
1541static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1542{
1543 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1544 struct bnx2i_hba *hba;
1545 struct cnic_dev *cnic = NULL;
1546
1547 bnx2i_reg_dev_all();
1548
1549 hba = get_adapter_list_head();
1550 if (hba && hba->cnic)
1551 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1552 if (!cnic) {
1553 printk(KERN_ALERT "bnx2i: no route,"
1554 "can't connect using cnic\n");
1555 goto no_nx2_route;
1556 }
1557 hba = bnx2i_find_hba_for_cnic(cnic);
1558 if (!hba)
1559 goto no_nx2_route;
1560
1561 if (bnx2i_adapter_ready(hba)) {
1562 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1563 goto no_nx2_route;
1564 }
1565 if (hba->netdev->mtu > hba->mtu_supported) {
1566 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1567 hba->netdev->name, hba->netdev->mtu);
1568 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1569 hba->mtu_supported);
1570 goto no_nx2_route;
1571 }
1572 return hba;
1573no_nx2_route:
1574 return NULL;
1575}
1576
1577
1578/**
1579 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1580 * @hba: pointer to adapter instance
1581 * @ep: endpoint (transport indentifier) structure
1582 *
1583 * destroys cm_sock structure and on chip iscsi context
1584 */
1585static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1586 struct bnx2i_endpoint *ep)
1587{
1588 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1589 hba->cnic->cm_destroy(ep->cm_sk);
1590
1591 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1592 ep->state = EP_STATE_DISCONN_COMPL;
1593
1594 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1595 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1596 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
1597 " NW/PCIe trace, driver msgs to developers"
1598 " for analysis\n");
1599 return 1;
1600 }
1601
1602 ep->state = EP_STATE_CLEANUP_START;
1603 init_timer(&ep->ofld_timer);
1604 ep->ofld_timer.expires = 10*HZ + jiffies;
1605 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1606 ep->ofld_timer.data = (unsigned long) ep;
1607 add_timer(&ep->ofld_timer);
1608
1609 bnx2i_ep_destroy_list_add(hba, ep);
1610
1611 /* destroy iSCSI context, wait for it to complete */
1612 bnx2i_send_conn_destroy(hba, ep);
1613 wait_event_interruptible(ep->ofld_wait,
1614 (ep->state != EP_STATE_CLEANUP_START));
1615
1616 if (signal_pending(current))
1617 flush_signals(current);
1618 del_timer_sync(&ep->ofld_timer);
1619
1620 bnx2i_ep_destroy_list_del(hba, ep);
1621
1622 if (ep->state != EP_STATE_CLEANUP_CMPL)
1623 /* should never happen */
1624 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1625
1626 return 0;
1627}
1628
1629
1630/**
1631 * bnx2i_ep_connect - establish TCP connection to target portal
1632 * @shost: scsi host
1633 * @dst_addr: target IP address
1634 * @non_blocking: blocking or non-blocking call
1635 *
1636 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1637 * with l5_core and the CNIC. This is a multi-step process of resolving
1638 * route to target, create a iscsi connection context, handshaking with
1639 * CNIC module to create/initialize the socket struct and finally
1640 * sending down option-2 request to complete TCP 3-way handshake
1641 */
1642static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1643 struct sockaddr *dst_addr,
1644 int non_blocking)
1645{
1646 u32 iscsi_cid = BNX2I_CID_RESERVED;
1647 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1648 struct sockaddr_in6 *desti6;
1649 struct bnx2i_endpoint *bnx2i_ep;
1650 struct bnx2i_hba *hba;
1651 struct cnic_dev *cnic;
1652 struct cnic_sockaddr saddr;
1653 struct iscsi_endpoint *ep;
1654 int rc = 0;
1655
1656 if (shost)
1657 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost);
1659 else
1660 /*
1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device
1663 */
1664 hba = bnx2i_check_route(dst_addr);
1665 if (!hba) {
1666 rc = -ENOMEM;
1667 goto check_busy;
1668 }
1669
1670 cnic = hba->cnic;
1671 ep = bnx2i_alloc_ep(hba);
1672 if (!ep) {
1673 rc = -ENOMEM;
1674 goto check_busy;
1675 }
1676 bnx2i_ep = ep->dd_data;
1677
1678 mutex_lock(&hba->net_dev_lock);
1679 if (bnx2i_adapter_ready(hba)) {
1680 rc = -EPERM;
1681 goto net_if_down;
1682 }
1683
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) {
1689 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
1690 rc = -ENOMEM;
1691 goto iscsi_cid_err;
1692 }
1693 bnx2i_ep->hba_age = hba->age;
1694
1695 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1696 if (rc != 0) {
1697 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
1698 rc = -ENOMEM;
1699 goto qp_resc_err;
1700 }
1701
1702 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1703 bnx2i_ep->state = EP_STATE_OFLD_START;
1704 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1705
1706 init_timer(&bnx2i_ep->ofld_timer);
1707 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1708 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1709 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1710 add_timer(&bnx2i_ep->ofld_timer);
1711
1712 bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
1713
1714 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1715 wait_event_interruptible(bnx2i_ep->ofld_wait,
1716 bnx2i_ep->state != EP_STATE_OFLD_START);
1717
1718 if (signal_pending(current))
1719 flush_signals(current);
1720 del_timer_sync(&bnx2i_ep->ofld_timer);
1721
1722 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1723
1724 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1725 rc = -ENOSPC;
1726 goto conn_failed;
1727 }
1728
1729 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1730 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1731 if (rc) {
1732 rc = -EINVAL;
1733 goto conn_failed;
1734 }
1735
1736 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1737 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1738 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1739
1740 memset(&saddr, 0, sizeof(saddr));
1741 if (dst_addr->sa_family == AF_INET) {
1742 desti = (struct sockaddr_in *) dst_addr;
1743 saddr.remote.v4 = *desti;
1744 saddr.local.v4.sin_family = desti->sin_family;
1745 } else if (dst_addr->sa_family == AF_INET6) {
1746 desti6 = (struct sockaddr_in6 *) dst_addr;
1747 saddr.remote.v6 = *desti6;
1748 saddr.local.v6.sin6_family = desti6->sin6_family;
1749 }
1750
1751 bnx2i_ep->timestamp = jiffies;
1752 bnx2i_ep->state = EP_STATE_CONNECT_START;
1753 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1754 rc = -EINVAL;
1755 goto conn_failed;
1756 } else
1757 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1758
1759 if (rc)
1760 goto release_ep;
1761
1762 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1763 goto release_ep;
1764 mutex_unlock(&hba->net_dev_lock);
1765 return ep;
1766
1767release_ep:
1768 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1769 mutex_unlock(&hba->net_dev_lock);
1770 return ERR_PTR(rc);
1771 }
1772conn_failed:
1773net_if_down:
1774iscsi_cid_err:
1775 bnx2i_free_qp_resc(hba, bnx2i_ep);
1776qp_resc_err:
1777 bnx2i_free_ep(ep);
1778 mutex_unlock(&hba->net_dev_lock);
1779check_busy:
1780 bnx2i_unreg_dev_all();
1781 return ERR_PTR(rc);
1782}
1783
1784
1785/**
1786 * bnx2i_ep_poll - polls for TCP connection establishement
1787 * @ep: TCP connection (endpoint) handle
1788 * @timeout_ms: timeout value in milli secs
1789 *
1790 * polls for TCP connect request to complete
1791 */
1792static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1793{
1794 struct bnx2i_endpoint *bnx2i_ep;
1795 int rc = 0;
1796
1797 bnx2i_ep = ep->dd_data;
1798 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1799 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1800 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1801 return -1;
1802 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1803 return 1;
1804
1805 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1806 ((bnx2i_ep->state ==
1807 EP_STATE_OFLD_FAILED) ||
1808 (bnx2i_ep->state ==
1809 EP_STATE_CONNECT_FAILED) ||
1810 (bnx2i_ep->state ==
1811 EP_STATE_CONNECT_COMPL)),
1812 msecs_to_jiffies(timeout_ms));
1813 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1814 rc = -1;
1815
1816 if (rc > 0)
1817 return 1;
1818 else if (!rc)
1819 return 0; /* timeout */
1820 else
1821 return rc;
1822}
1823
1824
1825/**
1826 * bnx2i_ep_tcp_conn_active - check EP state transition
1827 * @ep: endpoint pointer
1828 *
1829 * check if underlying TCP connection is active
1830 */
1831static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1832{
1833 int ret;
1834 int cnic_dev_10g = 0;
1835
1836 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1837 cnic_dev_10g = 1;
1838
1839 switch (bnx2i_ep->state) {
1840 case EP_STATE_CONNECT_START:
1841 case EP_STATE_CLEANUP_FAILED:
1842 case EP_STATE_OFLD_FAILED:
1843 case EP_STATE_DISCONN_TIMEDOUT:
1844 ret = 0;
1845 break;
1846 case EP_STATE_CONNECT_COMPL:
1847 case EP_STATE_ULP_UPDATE_START:
1848 case EP_STATE_ULP_UPDATE_COMPL:
1849 case EP_STATE_TCP_FIN_RCVD:
1850 case EP_STATE_ULP_UPDATE_FAILED:
1851 ret = 1;
1852 break;
1853 case EP_STATE_TCP_RST_RCVD:
1854 ret = 0;
1855 break;
1856 case EP_STATE_CONNECT_FAILED:
1857 if (cnic_dev_10g)
1858 ret = 1;
1859 else
1860 ret = 0;
1861 break;
1862 default:
1863 ret = 0;
1864 }
1865
1866 return ret;
1867}
1868
1869
1870/**
1871 * bnx2i_ep_disconnect - executes TCP connection teardown process
1872 * @ep: TCP connection (endpoint) handle
1873 *
1874 * executes TCP connection teardown process
1875 */
1876static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1877{
1878 struct bnx2i_endpoint *bnx2i_ep;
1879 struct bnx2i_conn *bnx2i_conn = NULL;
1880 struct iscsi_session *session = NULL;
1881 struct iscsi_conn *conn;
1882 struct cnic_dev *cnic;
1883 struct bnx2i_hba *hba;
1884
1885 bnx2i_ep = ep->dd_data;
1886
1887 /* driver should not attempt connection cleanup untill TCP_CONNECT
1888 * completes either successfully or fails. Timeout is 9-secs, so
1889 * wait for it to complete
1890 */
1891 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
1892 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
1893 msleep(250);
1894
1895 if (bnx2i_ep->conn) {
1896 bnx2i_conn = bnx2i_ep->conn;
1897 conn = bnx2i_conn->cls_conn->dd_data;
1898 session = conn->session;
1899
1900 spin_lock_bh(&session->lock);
1901 bnx2i_conn->is_bound = 0;
1902 spin_unlock_bh(&session->lock);
1903 }
1904
1905 hba = bnx2i_ep->hba;
1906 if (bnx2i_ep->state == EP_STATE_IDLE)
1907 goto return_bnx2i_ep;
1908 cnic = hba->cnic;
1909
1910 mutex_lock(&hba->net_dev_lock);
1911
1912 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1913 goto free_resc;
1914 if (bnx2i_ep->hba_age != hba->age)
1915 goto free_resc;
1916
1917 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1918 goto destory_conn;
1919
1920 bnx2i_ep->state = EP_STATE_DISCONN_START;
1921
1922 init_timer(&bnx2i_ep->ofld_timer);
1923 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1924 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1925 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1926 add_timer(&bnx2i_ep->ofld_timer);
1927
1928 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1929 int close = 0;
1930
1931 if (session) {
1932 spin_lock_bh(&session->lock);
1933 if (session->state == ISCSI_STATE_LOGGING_OUT)
1934 close = 1;
1935 spin_unlock_bh(&session->lock);
1936 }
1937 if (close)
1938 cnic->cm_close(bnx2i_ep->cm_sk);
1939 else
1940 cnic->cm_abort(bnx2i_ep->cm_sk);
1941 } else
1942 goto free_resc;
1943
1944 /* wait for option-2 conn teardown */
1945 wait_event_interruptible(bnx2i_ep->ofld_wait,
1946 bnx2i_ep->state != EP_STATE_DISCONN_START);
1947
1948 if (signal_pending(current))
1949 flush_signals(current);
1950 del_timer_sync(&bnx2i_ep->ofld_timer);
1951
1952destory_conn:
1953 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1954 mutex_unlock(&hba->net_dev_lock);
1955 return;
1956 }
1957free_resc:
1958 mutex_unlock(&hba->net_dev_lock);
1959 bnx2i_free_qp_resc(hba, bnx2i_ep);
1960return_bnx2i_ep:
1961 if (bnx2i_conn)
1962 bnx2i_conn->ep = NULL;
1963
1964 bnx2i_free_ep(ep);
1965
1966 if (!hba->ofld_conns_active)
1967 bnx2i_unreg_dev_all();
1968}
1969
1970
1971/**
1972 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
1973 * @buf: pointer to buffer containing iscsi path message
1974 *
1975 */
1976static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
1977{
1978 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1979 char *buf = (char *) params;
1980 u16 len = sizeof(*params);
1981
1982 /* handled by cnic driver */
1983 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
1984 len);
1985
1986 return 0;
1987}
1988
1989
1990/*
1991 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
1992 * used while registering with the scsi host and iSCSI transport module.
1993 */
1994static struct scsi_host_template bnx2i_host_template = {
1995 .module = THIS_MODULE,
1996 .name = "Broadcom Offload iSCSI Initiator",
1997 .proc_name = "bnx2i",
1998 .queuecommand = iscsi_queuecommand,
1999 .eh_abort_handler = iscsi_eh_abort,
2000 .eh_device_reset_handler = iscsi_eh_device_reset,
2001 .eh_target_reset_handler = iscsi_eh_target_reset,
2002 .can_queue = 1024,
2003 .max_sectors = 127,
2004 .cmd_per_lun = 32,
2005 .this_id = -1,
2006 .use_clustering = ENABLE_CLUSTERING,
2007 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2008 .shost_attrs = bnx2i_dev_attributes,
2009};
2010
2011struct iscsi_transport bnx2i_iscsi_transport = {
2012 .owner = THIS_MODULE,
2013 .name = "bnx2i",
2014 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2015 CAP_MULTI_R2T | CAP_DATADGST |
2016 CAP_DATA_PATH_OFFLOAD,
2017 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2018 ISCSI_MAX_XMIT_DLENGTH |
2019 ISCSI_HDRDGST_EN |
2020 ISCSI_DATADGST_EN |
2021 ISCSI_INITIAL_R2T_EN |
2022 ISCSI_MAX_R2T |
2023 ISCSI_IMM_DATA_EN |
2024 ISCSI_FIRST_BURST |
2025 ISCSI_MAX_BURST |
2026 ISCSI_PDU_INORDER_EN |
2027 ISCSI_DATASEQ_INORDER_EN |
2028 ISCSI_ERL |
2029 ISCSI_CONN_PORT |
2030 ISCSI_CONN_ADDRESS |
2031 ISCSI_EXP_STATSN |
2032 ISCSI_PERSISTENT_PORT |
2033 ISCSI_PERSISTENT_ADDRESS |
2034 ISCSI_TARGET_NAME | ISCSI_TPGT |
2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2038 ISCSI_LU_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create,
2045 .bind_conn = bnx2i_conn_bind,
2046 .destroy_conn = bnx2i_conn_destroy,
2047 .set_param = iscsi_set_param,
2048 .get_conn_param = bnx2i_conn_get_param,
2049 .get_session_param = iscsi_session_get_param,
2050 .get_host_param = bnx2i_host_get_param,
2051 .start_conn = bnx2i_conn_start,
2052 .stop_conn = iscsi_conn_stop,
2053 .send_pdu = iscsi_conn_send_pdu,
2054 .xmit_task = bnx2i_task_xmit,
2055 .get_stats = bnx2i_conn_get_stats,
2056 /* TCP connect - disconnect - option-2 interface calls */
2057 .ep_connect = bnx2i_ep_connect,
2058 .ep_poll = bnx2i_ep_poll,
2059 .ep_disconnect = bnx2i_ep_disconnect,
2060 .set_path = bnx2i_nl_set_path,
2061 /* Error recovery timeout call */
2062 .session_recovery_timedout = iscsi_session_recovery_timedout,
2063 .cleanup_task = bnx2i_cleanup_task,
2064};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 00000000000..96426b751eb
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11
12#include "bnx2i.h"
13
14/**
15 * bnx2i_dev_to_hba - maps dev pointer to adapter struct
16 * @dev: device pointer
17 *
18 * Map device to hba structure
19 */
20static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
21{
22 struct Scsi_Host *shost = class_to_shost(dev);
23 return iscsi_host_priv(shost);
24}
25
26
27/**
28 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
29 * @dev: device pointer
30 * @buf: buffer to return current SQ size parameter
31 *
32 * Returns current SQ size parameter, this paramater determines the number
33 * outstanding iSCSI commands supported on a connection
34 */
35static ssize_t bnx2i_show_sq_info(struct device *dev,
36 struct device_attribute *attr, char *buf)
37{
38 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
39
40 return sprintf(buf, "0x%x\n", hba->max_sqes);
41}
42
43
44/**
45 * bnx2i_set_sq_info - update send queue (SQ) size parameter
46 * @dev: device pointer
47 * @buf: buffer to return current SQ size parameter
48 * @count: parameter buffer size
49 *
50 * Interface for user to change shared queue size allocated for each conn
51 * Must be within SQ limits and a power of 2. For the latter this is needed
52 * because of how libiscsi preallocates tasks.
53 */
54static ssize_t bnx2i_set_sq_info(struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{
58 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
59 u32 val;
60 int max_sq_size;
61
62 if (hba->ofld_conns_active)
63 goto skip_config;
64
65 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
66 max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
67 else
68 max_sq_size = BNX2I_570X_SQ_WQES_MAX;
69
70 if (sscanf(buf, " 0x%x ", &val) > 0) {
71 if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
72 (is_power_of_2(val)))
73 hba->max_sqes = val;
74 }
75
76 return count;
77
78skip_config:
79 printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
80 return 0;
81}
82
83
84/**
85 * bnx2i_show_ccell_info - returns command cell (HQ) size
86 * @dev: device pointer
87 * @buf: buffer to return current SQ size parameter
88 *
89 * returns per-connection TCP history queue size parameter
90 */
91static ssize_t bnx2i_show_ccell_info(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
95
96 return sprintf(buf, "0x%x\n", hba->num_ccell);
97}
98
99
100/**
101 * bnx2i_get_link_state - set command cell (HQ) size
102 * @dev: device pointer
103 * @buf: buffer to return current SQ size parameter
104 * @count: parameter buffer size
105 *
106 * updates per-connection TCP history queue size parameter
107 */
108static ssize_t bnx2i_set_ccell_info(struct device *dev,
109 struct device_attribute *attr,
110 const char *buf, size_t count)
111{
112 u32 val;
113 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
114
115 if (hba->ofld_conns_active)
116 goto skip_config;
117
118 if (sscanf(buf, " 0x%x ", &val) > 0) {
119 if ((val >= BNX2I_CCELLS_MIN) &&
120 (val <= BNX2I_CCELLS_MAX)) {
121 hba->num_ccell = val;
122 }
123 }
124
125 return count;
126
127skip_config:
128 printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
129 return 0;
130}
131
132
133static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
134 bnx2i_show_sq_info, bnx2i_set_sq_info);
135static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
136 bnx2i_show_ccell_info, bnx2i_set_ccell_info);
137
138struct device_attribute *bnx2i_dev_attributes[] = {
139 &dev_attr_sq_size,
140 &dev_attr_num_ccell,
141 NULL
142};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d1..e3133b58e59 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *); 144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *); 145void cxgb3i_adapter_close(struct t3cdev *);
146 146
147struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
148struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, 147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
149 struct net_device *); 148 struct net_device *);
150void cxgb3i_hba_host_remove(struct cxgb3i_hba *); 149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b1..74369a3f963 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <net/dst.h>
16#include <net/tcp.h> 17#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h> 19#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
178 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device 179 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
179 * @t3dev: t3cdev adapter 180 * @t3dev: t3cdev adapter
180 */ 181 */
181struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 182static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
182{ 183{
183 struct cxgb3i_adapter *snic; 184 struct cxgb3i_adapter *snic;
184 int i; 185 int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
261 262
262/** 263/**
263 * cxgb3i_ep_connect - establish TCP connection to target portal 264 * cxgb3i_ep_connect - establish TCP connection to target portal
265 * @shost: scsi host to use
264 * @dst_addr: target IP address 266 * @dst_addr: target IP address
265 * @non_blocking: blocking or non-blocking call 267 * @non_blocking: blocking or non-blocking call
266 * 268 *
267 * Initiates a TCP/IP connection to the dst_addr 269 * Initiates a TCP/IP connection to the dst_addr
268 */ 270 */
269static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, 271static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
272 struct sockaddr *dst_addr,
270 int non_blocking) 273 int non_blocking)
271{ 274{
272 struct iscsi_endpoint *ep; 275 struct iscsi_endpoint *ep;
273 struct cxgb3i_endpoint *cep; 276 struct cxgb3i_endpoint *cep;
274 struct cxgb3i_hba *hba; 277 struct cxgb3i_hba *hba = NULL;
275 struct s3_conn *c3cn = NULL; 278 struct s3_conn *c3cn = NULL;
276 int err = 0; 279 int err = 0;
277 280
281 if (shost)
282 hba = iscsi_host_priv(shost);
283
284 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
285
278 c3cn = cxgb3i_c3cn_create(); 286 c3cn = cxgb3i_c3cn_create();
279 if (!c3cn) { 287 if (!c3cn) {
280 cxgb3i_log_info("ep connect OOM.\n"); 288 cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
282 goto release_conn; 290 goto release_conn;
283 } 291 }
284 292
285 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); 293 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
294 (struct sockaddr_in *)dst_addr);
286 if (err < 0) { 295 if (err < 0) {
287 cxgb3i_log_info("ep connect failed.\n"); 296 cxgb3i_log_info("ep connect failed.\n");
288 goto release_conn; 297 goto release_conn;
289 } 298 }
299
290 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); 300 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
291 if (!hba) { 301 if (!hba) {
292 err = -ENOSPC; 302 err = -ENOSPC;
293 cxgb3i_log_info("NOT going through cxgbi device.\n"); 303 cxgb3i_log_info("NOT going through cxgbi device.\n");
294 goto release_conn; 304 goto release_conn;
295 } 305 }
306
307 if (shost && hba != iscsi_host_priv(shost)) {
308 err = -ENOSPC;
309 cxgb3i_log_info("Could not connect through request host%u\n",
310 shost->host_no);
311 goto release_conn;
312 }
313
296 if (c3cn_is_closing(c3cn)) { 314 if (c3cn_is_closing(c3cn)) {
297 err = -ENOSPC; 315 err = -ENOSPC;
298 cxgb3i_log_info("ep connect unable to connect.\n"); 316 cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f3..c1d5be4adf9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1479 return NULL; 1479 return NULL;
1480} 1480}
1481 1481
1482static struct rtable *find_route(__be32 saddr, __be32 daddr, 1482static struct rtable *find_route(struct net_device *dev,
1483 __be32 saddr, __be32 daddr,
1483 __be16 sport, __be16 dport) 1484 __be16 sport, __be16 dport)
1484{ 1485{
1485 struct rtable *rt; 1486 struct rtable *rt;
1486 struct flowi fl = { 1487 struct flowi fl = {
1487 .oif = 0, 1488 .oif = dev ? dev->ifindex : 0,
1488 .nl_u = { 1489 .nl_u = {
1489 .ip4_u = { 1490 .ip4_u = {
1490 .daddr = daddr, 1491 .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
1573 * 1574 *
1574 * return 0 if active open request is sent, < 0 otherwise. 1575 * return 0 if active open request is sent, < 0 otherwise.
1575 */ 1576 */
1576int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) 1577int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1578 struct sockaddr_in *usin)
1577{ 1579{
1578 struct rtable *rt; 1580 struct rtable *rt;
1579 struct net_device *dev;
1580 struct cxgb3i_sdev_data *cdata; 1581 struct cxgb3i_sdev_data *cdata;
1581 struct t3cdev *cdev; 1582 struct t3cdev *cdev;
1582 __be32 sipv4; 1583 __be32 sipv4;
1583 int err; 1584 int err;
1584 1585
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1587
1585 if (usin->sin_family != AF_INET) 1588 if (usin->sin_family != AF_INET)
1586 return -EAFNOSUPPORT; 1589 return -EAFNOSUPPORT;
1587 1590
1588 c3cn->daddr.sin_port = usin->sin_port; 1591 c3cn->daddr.sin_port = usin->sin_port;
1589 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1590 1593
1591 rt = find_route(c3cn->saddr.sin_addr.s_addr, 1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1592 c3cn->daddr.sin_addr.s_addr, 1595 c3cn->daddr.sin_addr.s_addr,
1593 c3cn->saddr.sin_port, 1596 c3cn->saddr.sin_port,
1594 c3cn->daddr.sin_port); 1597 c3cn->daddr.sin_port);
1595 if (rt == NULL) { 1598 if (rt == NULL) {
1596 c3cn_conn_debug("NO route to 0x%x, port %u.\n", 1599 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1597 c3cn->daddr.sin_addr.s_addr, 1600 c3cn->daddr.sin_addr.s_addr,
1598 ntohs(c3cn->daddr.sin_port)); 1601 ntohs(c3cn->daddr.sin_port),
1602 dev ? dev->name : "any");
1599 return -ENETUNREACH; 1603 return -ENETUNREACH;
1600 } 1604 }
1601 1605
1602 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 1606 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1603 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", 1607 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1604 c3cn->daddr.sin_addr.s_addr, 1608 c3cn->daddr.sin_addr.s_addr,
1605 ntohs(c3cn->daddr.sin_port)); 1609 ntohs(c3cn->daddr.sin_port),
1610 dev ? dev->name : "any");
1606 ip_rt_put(rt); 1611 ip_rt_put(rt);
1607 return -ENETUNREACH; 1612 return -ENETUNREACH;
1608 } 1613 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a..6a1d86b1faf 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *); 169void cxgb3i_sdev_remove(struct t3cdev *);
170 170
171struct s3_conn *cxgb3i_c3cn_create(void); 171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); 172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
173void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); 174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
174int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); 175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
175void cxgb3i_c3cn_release(struct s3_conn *); 176void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d..fd0544f7da8 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
561 struct rdac_dh_data *h = get_rdac_data(sdev); 561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) { 562 switch (sense_hdr->sense_key) {
563 case NOT_READY: 563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
565 /* LUN Not Ready - Logical Unit Not Ready and is in
566 * the process of becoming ready
567 * Just retry.
568 */
569 return ADD_TO_MLQUEUE;
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 570 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible 571 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required. 572 * Manual code synchonisation required.
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
index 4b56c0436ba..b2613c2eaac 100644
--- a/drivers/scsi/dpt/osd_util.h
+++ b/drivers/scsi/dpt/osd_util.h
@@ -342,7 +342,7 @@ uLONG osdGetThreadID(void);
342/* wakes up the specifed thread */ 342/* wakes up the specifed thread */
343void osdWakeThread(uLONG); 343void osdWakeThread(uLONG);
344 344
345/* osd sleep for x miliseconds */ 345/* osd sleep for x milliseconds */
346void osdSleep(uLONG); 346void osdSleep(uLONG);
347 347
348#define DPT_THREAD_PRIORITY_LOWEST 0x00 348#define DPT_THREAD_PRIORITY_LOWEST 0x00
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b..c7076ce25e2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f791348871f..c15878e8815 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
54/* fcoe host list */ 54/* fcoe host list */
55LIST_HEAD(fcoe_hostlist); 55LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock); 56DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 57DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59 58
60/* Function Prototypes */ 59/* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 70static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 71static int fcoe_hostlist_remove(const struct fc_lport *);
73 72
74static int fcoe_check_wait_queue(struct fc_lport *); 73static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 74static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 75static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 76static void fcoe_dev_cleanup(void);
@@ -198,6 +197,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
198 lp->link_up = 0; 197 lp->link_up = 0;
199 lp->qfull = 0; 198 lp->qfull = 0;
200 lp->max_retry_count = 3; 199 lp->max_retry_count = 3;
200 lp->max_rport_retry_count = 3;
201 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 201 lp->e_d_tov = 2 * 1000; /* FC-FS default */
202 lp->r_a_tov = 2 * 2 * 1000; 202 lp->r_a_tov = 2 * 2 * 1000;
203 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 203 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -243,6 +243,18 @@ void fcoe_netdev_cleanup(struct fcoe_softc *fc)
243} 243}
244 244
245/** 245/**
246 * fcoe_queue_timer() - fcoe queue timer
247 * @lp: the fc_lport pointer
248 *
249 * Calls fcoe_check_wait_queue on timeout
250 *
251 */
252static void fcoe_queue_timer(ulong lp)
253{
254 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
255}
256
257/**
246 * fcoe_netdev_config() - Set up netdev for SW FCoE 258 * fcoe_netdev_config() - Set up netdev for SW FCoE
247 * @lp : ptr to the fc_lport 259 * @lp : ptr to the fc_lport
248 * @netdev : ptr to the associated netdevice struct 260 * @netdev : ptr to the associated netdevice struct
@@ -313,6 +325,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
313 } 325 }
314 skb_queue_head_init(&fc->fcoe_pending_queue); 326 skb_queue_head_init(&fc->fcoe_pending_queue);
315 fc->fcoe_pending_queue_active = 0; 327 fc->fcoe_pending_queue_active = 0;
328 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
316 329
317 /* look for SAN MAC address, if multiple SAN MACs exist, only 330 /* look for SAN MAC address, if multiple SAN MACs exist, only
318 * use the first one for SPMA */ 331 * use the first one for SPMA */
@@ -474,6 +487,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
474 /* Free existing skbs */ 487 /* Free existing skbs */
475 fcoe_clean_pending_queue(lp); 488 fcoe_clean_pending_queue(lp);
476 489
490 /* Stop the timer */
491 del_timer_sync(&fc->timer);
492
477 /* Free memory used by statistical counters */ 493 /* Free memory used by statistical counters */
478 fc_lport_free_stats(lp); 494 fc_lport_free_stats(lp);
479 495
@@ -1021,7 +1037,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
1021 */ 1037 */
1022int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1038int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1023{ 1039{
1024 int wlen, rc = 0; 1040 int wlen;
1025 u32 crc; 1041 u32 crc;
1026 struct ethhdr *eh; 1042 struct ethhdr *eh;
1027 struct fcoe_crc_eof *cp; 1043 struct fcoe_crc_eof *cp;
@@ -1054,8 +1070,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1054 sof = fr_sof(fp); 1070 sof = fr_sof(fp);
1055 eof = fr_eof(fp); 1071 eof = fr_eof(fp);
1056 1072
1057 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? 1073 elen = sizeof(struct ethhdr);
1058 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1059 hlen = sizeof(struct fcoe_hdr); 1074 hlen = sizeof(struct fcoe_hdr);
1060 tlen = sizeof(struct fcoe_crc_eof); 1075 tlen = sizeof(struct fcoe_crc_eof);
1061 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1076 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1140,18 +1155,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1140 /* send down to lld */ 1155 /* send down to lld */
1141 fr_dev(fp) = lp; 1156 fr_dev(fp) = lp;
1142 if (fc->fcoe_pending_queue.qlen) 1157 if (fc->fcoe_pending_queue.qlen)
1143 rc = fcoe_check_wait_queue(lp); 1158 fcoe_check_wait_queue(lp, skb);
1144 1159 else if (fcoe_start_io(skb))
1145 if (rc == 0) 1160 fcoe_check_wait_queue(lp, skb);
1146 rc = fcoe_start_io(skb);
1147
1148 if (rc) {
1149 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1150 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1151 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1152 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1153 lp->qfull = 1;
1154 }
1155 1161
1156 return 0; 1162 return 0;
1157} 1163}
@@ -1301,32 +1307,6 @@ int fcoe_percpu_receive_thread(void *arg)
1301} 1307}
1302 1308
1303/** 1309/**
1304 * fcoe_watchdog() - fcoe timer callback
1305 * @vp:
1306 *
1307 * This checks the pending queue length for fcoe and set lport qfull
1308 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1309 * fcoe_hostlist.
1310 *
1311 * Returns: 0 for success
1312 */
1313void fcoe_watchdog(ulong vp)
1314{
1315 struct fcoe_softc *fc;
1316
1317 read_lock(&fcoe_hostlist_lock);
1318 list_for_each_entry(fc, &fcoe_hostlist, list) {
1319 if (fc->ctlr.lp)
1320 fcoe_check_wait_queue(fc->ctlr.lp);
1321 }
1322 read_unlock(&fcoe_hostlist_lock);
1323
1324 fcoe_timer.expires = jiffies + (1 * HZ);
1325 add_timer(&fcoe_timer);
1326}
1327
1328
1329/**
1330 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1310 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1331 * @lp: the fc_lport 1311 * @lp: the fc_lport
1332 * 1312 *
@@ -1338,16 +1318,17 @@ void fcoe_watchdog(ulong vp)
1338 * The wait_queue is used when the skb transmit fails. skb will go 1318 * The wait_queue is used when the skb transmit fails. skb will go
1339 * in the wait_queue which will be emptied by the timer function or 1319 * in the wait_queue which will be emptied by the timer function or
1340 * by the next skb transmit. 1320 * by the next skb transmit.
1341 *
1342 * Returns: 0 for success
1343 */ 1321 */
1344static int fcoe_check_wait_queue(struct fc_lport *lp) 1322static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1345{ 1323{
1346 struct fcoe_softc *fc = lport_priv(lp); 1324 struct fcoe_softc *fc = lport_priv(lp);
1347 struct sk_buff *skb; 1325 int rc;
1348 int rc = -1;
1349 1326
1350 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1327 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1328
1329 if (skb)
1330 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1331
1351 if (fc->fcoe_pending_queue_active) 1332 if (fc->fcoe_pending_queue_active)
1352 goto out; 1333 goto out;
1353 fc->fcoe_pending_queue_active = 1; 1334 fc->fcoe_pending_queue_active = 1;
@@ -1373,23 +1354,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1373 1354
1374 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1355 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1375 lp->qfull = 0; 1356 lp->qfull = 0;
1357 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1358 mod_timer(&fc->timer, jiffies + 2);
1376 fc->fcoe_pending_queue_active = 0; 1359 fc->fcoe_pending_queue_active = 0;
1377 rc = fc->fcoe_pending_queue.qlen;
1378out: 1360out:
1361 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1362 lp->qfull = 1;
1379 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1363 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1380 return rc; 1364 return;
1381} 1365}
1382 1366
1383/** 1367/**
1384 * fcoe_dev_setup() - setup link change notification interface 1368 * fcoe_dev_setup() - setup link change notification interface
1385 */ 1369 */
1386static void fcoe_dev_setup() 1370static void fcoe_dev_setup(void)
1387{ 1371{
1388 register_netdevice_notifier(&fcoe_notifier); 1372 register_netdevice_notifier(&fcoe_notifier);
1389} 1373}
1390 1374
1391/** 1375/**
1392 * fcoe_dev_setup() - cleanup link change notification interface 1376 * fcoe_dev_cleanup() - cleanup link change notification interface
1393 */ 1377 */
1394static void fcoe_dev_cleanup(void) 1378static void fcoe_dev_cleanup(void)
1395{ 1379{
@@ -1848,10 +1832,6 @@ static int __init fcoe_init(void)
1848 /* Setup link change notification */ 1832 /* Setup link change notification */
1849 fcoe_dev_setup(); 1833 fcoe_dev_setup();
1850 1834
1851 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1852
1853 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1854
1855 fcoe_if_init(); 1835 fcoe_if_init();
1856 1836
1857 return 0; 1837 return 0;
@@ -1877,9 +1857,6 @@ static void __exit fcoe_exit(void)
1877 1857
1878 fcoe_dev_cleanup(); 1858 fcoe_dev_cleanup();
1879 1859
1880 /* Stop the timer */
1881 del_timer_sync(&fcoe_timer);
1882
1883 /* releases the associated fcoe hosts */ 1860 /* releases the associated fcoe hosts */
1884 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1861 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1885 fcoe_if_destroy(fc->real_dev); 1862 fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae88689..a1eb8c1988b 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -61,6 +61,7 @@ struct fcoe_softc {
61 struct packet_type fip_packet_type; 61 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue; 62 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active; 63 u8 fcoe_pending_queue_active;
64 struct timer_list timer; /* queue timer */
64 struct fcoe_ctlr ctlr; 65 struct fcoe_ctlr ctlr;
65}; 66};
66 67
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index b9aa280100b..2f5bc7fd3fa 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -215,7 +215,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
215 sol->desc.size.fd_size = htons(fcoe_size); 215 sol->desc.size.fd_size = htons(fcoe_size);
216 216
217 skb_put(skb, sizeof(*sol)); 217 skb_put(skb, sizeof(*sol));
218 skb->protocol = htons(ETH_P_802_3); 218 skb->protocol = htons(ETH_P_FIP);
219 skb_reset_mac_header(skb); 219 skb_reset_mac_header(skb);
220 skb_reset_network_header(skb); 220 skb_reset_network_header(skb);
221 fip->send(fip, skb); 221 fip->send(fip, skb);
@@ -369,7 +369,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
369 } 369 }
370 370
371 skb_put(skb, len); 371 skb_put(skb, len);
372 skb->protocol = htons(ETH_P_802_3); 372 skb->protocol = htons(ETH_P_FIP);
373 skb_reset_mac_header(skb); 373 skb_reset_mac_header(skb);
374 skb_reset_network_header(skb); 374 skb_reset_network_header(skb);
375 fip->send(fip, skb); 375 fip->send(fip, skb);
@@ -432,7 +432,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
432 else if (fip->spma) 432 else if (fip->spma)
433 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); 433 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
434 434
435 skb->protocol = htons(ETH_P_802_3); 435 skb->protocol = htons(ETH_P_FIP);
436 skb_reset_mac_header(skb); 436 skb_reset_mac_header(skb);
437 skb_reset_network_header(skb); 437 skb_reset_network_header(skb);
438 return 0; 438 return 0;
@@ -455,14 +455,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
455 u16 old_xid; 455 u16 old_xid;
456 u8 op; 456 u8 op;
457 457
458 if (fip->state == FIP_ST_NON_FIP)
459 return 0;
460
461 fh = (struct fc_frame_header *)skb->data; 458 fh = (struct fc_frame_header *)skb->data;
462 op = *(u8 *)(fh + 1); 459 op = *(u8 *)(fh + 1);
463 460
464 switch (op) { 461 if (op == ELS_FLOGI) {
465 case ELS_FLOGI:
466 old_xid = fip->flogi_oxid; 462 old_xid = fip->flogi_oxid;
467 fip->flogi_oxid = ntohs(fh->fh_ox_id); 463 fip->flogi_oxid = ntohs(fh->fh_ox_id);
468 if (fip->state == FIP_ST_AUTO) { 464 if (fip->state == FIP_ST_AUTO) {
@@ -474,6 +470,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
474 fip->map_dest = 1; 470 fip->map_dest = 1;
475 return 0; 471 return 0;
476 } 472 }
473 if (fip->state == FIP_ST_NON_FIP)
474 fip->map_dest = 1;
475 }
476
477 if (fip->state == FIP_ST_NON_FIP)
478 return 0;
479
480 switch (op) {
481 case ELS_FLOGI:
477 op = FIP_DT_FLOGI; 482 op = FIP_DT_FLOGI;
478 break; 483 break;
479 case ELS_FDISC: 484 case ELS_FDISC:
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 00000000000..37c3440bc17
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_FCOE_FNIC) += fnic.o
2
3fnic-y := \
4 fnic_attrs.o \
5 fnic_isr.o \
6 fnic_main.o \
7 fnic_res.o \
8 fnic_fcs.o \
9 fnic_scsi.o \
10 vnic_cq.o \
11 vnic_dev.o \
12 vnic_intr.o \
13 vnic_rq.o \
14 vnic_wq_copy.o \
15 vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 00000000000..d1225cf6320
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_DESC_H_
19#define _CQ_DESC_H_
20
21/*
22 * Completion queue descriptor types
23 */
24enum cq_desc_types {
25 CQ_DESC_TYPE_WQ_ENET = 0,
26 CQ_DESC_TYPE_DESC_COPY = 1,
27 CQ_DESC_TYPE_WQ_EXCH = 2,
28 CQ_DESC_TYPE_RQ_ENET = 3,
29 CQ_DESC_TYPE_RQ_FCP = 4,
30};
31
32/* Completion queue descriptor: 16B
33 *
34 * All completion queues have this basic layout. The
35 * type_specfic area is unique for each completion
36 * queue type.
37 */
38struct cq_desc {
39 __le16 completed_index;
40 __le16 q_number;
41 u8 type_specfic[11];
42 u8 type_color;
43};
44
45#define CQ_DESC_TYPE_BITS 4
46#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
47#define CQ_DESC_COLOR_MASK 1
48#define CQ_DESC_COLOR_SHIFT 7
49#define CQ_DESC_Q_NUM_BITS 10
50#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
51#define CQ_DESC_COMP_NDX_BITS 12
52#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
53
54static inline void cq_desc_dec(const struct cq_desc *desc_arg,
55 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
56{
57 const struct cq_desc *desc = desc_arg;
58 const u8 type_color = desc->type_color;
59
60 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
61
62 /*
63 * Make sure color bit is read from desc *before* other fields
64 * are read from desc. Hardware guarantees color bit is last
65 * bit (byte) written. Adding the rmb() prevents the compiler
66 * and/or CPU from reordering the reads which would potentially
67 * result in reading stale values.
68 */
69
70 rmb();
71
72 *type = type_color & CQ_DESC_TYPE_MASK;
73 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
74 *completed_index = le16_to_cpu(desc->completed_index) &
75 CQ_DESC_COMP_NDX_MASK;
76}
77
78#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 00000000000..a9fa26f82dd
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_ENET_DESC_H_
19#define _CQ_ENET_DESC_H_
20
21#include "cq_desc.h"
22
23/* Ethernet completion queue descriptor: 16B */
24struct cq_enet_wq_desc {
25 __le16 completed_index;
26 __le16 q_number;
27 u8 reserved[11];
28 u8 type_color;
29};
30
31static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
32 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
33{
34 cq_desc_dec((struct cq_desc *)desc, type,
35 color, q_number, completed_index);
36}
37
38/* Completion queue descriptor: Ethernet receive queue, 16B */
39struct cq_enet_rq_desc {
40 __le16 completed_index_flags;
41 __le16 q_number_rss_type_flags;
42 __le32 rss_hash;
43 __le16 bytes_written_flags;
44 __le16 vlan;
45 __le16 checksum_fcoe;
46 u8 flags;
47 u8 type_color;
48};
49
50#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
51#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
52#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
53#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
54
55#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
56#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
57 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
58#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
59#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
60#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
65
66#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
67
68#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
69#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
70 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
71#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
72#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
73
74#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
75#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
76 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
77#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
78#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
79 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
80#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
81
82#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
83#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
84#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
85#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
86#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
87#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
88#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
91#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
92
93static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
94 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
95 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
96 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
97 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
98 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
99 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
100 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
101{
102 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
103 u16 q_number_rss_type_flags =
104 le16_to_cpu(desc->q_number_rss_type_flags);
105 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
106
107 cq_desc_dec((struct cq_desc *)desc, type,
108 color, q_number, completed_index);
109
110 *ingress_port = (completed_index_flags &
111 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
112 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
113 1 : 0;
114 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
115 1 : 0;
116 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
117 1 : 0;
118
119 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
120 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
121 *csum_not_calc = (q_number_rss_type_flags &
122 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
123
124 *rss_hash = le32_to_cpu(desc->rss_hash);
125
126 *bytes_written = bytes_written_flags &
127 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
128 *packet_error = (bytes_written_flags &
129 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
130 *vlan_stripped = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
132
133 *vlan = le16_to_cpu(desc->vlan);
134
135 if (*fcoe) {
136 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
137 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
138 *fcoe_fc_crc_ok = (desc->flags &
139 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
140 *fcoe_enc_error = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
142 *fcoe_eof = (u8)((desc->checksum_fcoe >>
143 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
144 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
145 *checksum = 0;
146 } else {
147 *fcoe_sof = 0;
148 *fcoe_fc_crc_ok = 0;
149 *fcoe_enc_error = 0;
150 *fcoe_eof = 0;
151 *checksum = le16_to_cpu(desc->checksum_fcoe);
152 }
153
154 *tcp_udp_csum_ok =
155 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
156 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
157 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
158 *ipv4_csum_ok =
159 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
160 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
161 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
162 *ipv4_fragment =
163 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
164 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
165}
166
167#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 00000000000..501660cfe22
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_EXCH_DESC_H_
19#define _CQ_EXCH_DESC_H_
20
21#include "cq_desc.h"
22
23/* Exchange completion queue descriptor: 16B */
24struct cq_exch_wq_desc {
25 u16 completed_index;
26 u16 q_number;
27 u16 exchange_id;
28 u8 tmpl;
29 u8 reserved0;
30 u32 reserved1;
31 u8 exch_status;
32 u8 reserved2[2];
33 u8 type_color;
34};
35
36#define CQ_EXCH_WQ_STATUS_BITS 2
37#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
38
39enum cq_exch_status_types {
40 CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
41 CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
42 CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
43 CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
44};
45
46static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
47 u8 *type,
48 u8 *color,
49 u16 *q_number,
50 u16 *completed_index,
51 u8 *exch_status)
52{
53 cq_desc_dec((struct cq_desc *)desc_ptr, type,
54 color, q_number, completed_index);
55 *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
56}
57
58struct cq_fcp_rq_desc {
59 u16 completed_index_eop_sop_prt;
60 u16 q_number;
61 u16 exchange_id;
62 u16 tmpl;
63 u16 bytes_written;
64 u16 vlan;
65 u8 sof;
66 u8 eof;
67 u8 fcs_fer_fck;
68 u8 type_color;
69};
70
71#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
72#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
73#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
74#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
75#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
76#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
77#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
78#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
79#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
80#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
81#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
82#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
83#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
84#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
85
86static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
87 u8 *type,
88 u8 *color,
89 u16 *q_number,
90 u16 *completed_index,
91 u8 *eop,
92 u8 *sop,
93 u8 *fck,
94 u16 *exchange_id,
95 u16 *tmpl,
96 u32 *bytes_written,
97 u8 *sof,
98 u8 *eof,
99 u8 *ingress_port,
100 u8 *packet_err,
101 u8 *fcoe_err,
102 u8 *fcs_ok,
103 u8 *vlan_stripped,
104 u16 *vlan)
105{
106 cq_desc_dec((struct cq_desc *)desc_ptr, type,
107 color, q_number, completed_index);
108 *eop = (desc_ptr->completed_index_eop_sop_prt &
109 CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
110 *sop = (desc_ptr->completed_index_eop_sop_prt &
111 CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
112 *ingress_port =
113 (desc_ptr->completed_index_eop_sop_prt &
114 CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
115 *exchange_id = desc_ptr->exchange_id;
116 *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
117 *bytes_written =
118 desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
119 *packet_err =
120 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
121 CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
122 *vlan_stripped =
123 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
124 CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
125 *vlan = desc_ptr->vlan;
126 *sof = desc_ptr->sof;
127 *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
128 *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
129 CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
130 *eof = desc_ptr->eof;
131 *fcs_ok =
132 (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
133 CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
134}
135
136struct cq_sgl_desc {
137 u16 exchange_id;
138 u16 q_number;
139 u32 active_burst_offset;
140 u32 tot_data_bytes;
141 u16 tmpl;
142 u8 sgl_err;
143 u8 type_color;
144};
145
146enum cq_sgl_err_types {
147 CQ_SGL_ERR_NO_ERROR = 0,
148 CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
149 CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
150 CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
151 CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
152 CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
153 CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
154 CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
155 CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
156 CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
157};
158
159#define CQ_SGL_SGL_ERR_MASK 0x1f
160#define CQ_SGL_TMPL_MASK 0x1f
161
162static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
163 u8 *type,
164 u8 *color,
165 u16 *q_number,
166 u16 *exchange_id,
167 u32 *active_burst_offset,
168 u32 *tot_data_bytes,
169 u16 *tmpl,
170 u8 *sgl_err)
171{
172 /* Cheat a little by assuming exchange_id is the same as completed
173 index */
174 cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
175 exchange_id);
176 *active_burst_offset = desc_ptr->active_burst_offset;
177 *tot_data_bytes = desc_ptr->tot_data_bytes;
178 *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
179 *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
180}
181
182#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 00000000000..12d770d885c
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FCPIO_H_
19#define _FCPIO_H_
20
21#include <linux/if_ether.h>
22
23/*
24 * This header file includes all of the data structures used for
25 * communication by the host driver to the fcp firmware.
26 */
27
28/*
29 * Exchange and sequence id space allocated to the host driver
30 */
31#define FCPIO_HOST_EXCH_RANGE_START 0x1000
32#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
33#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
34#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
35
36/*
37 * Command entry type
38 */
39enum fcpio_type {
40 /*
41 * Initiator request types
42 */
43 FCPIO_ICMND_16 = 0x1,
44 FCPIO_ICMND_32,
45 FCPIO_ICMND_CMPL,
46 FCPIO_ITMF,
47 FCPIO_ITMF_CMPL,
48
49 /*
50 * Target request types
51 */
52 FCPIO_TCMND_16 = 0x11,
53 FCPIO_TCMND_32,
54 FCPIO_TDATA,
55 FCPIO_TXRDY,
56 FCPIO_TRSP,
57 FCPIO_TDRSP_CMPL,
58 FCPIO_TTMF,
59 FCPIO_TTMF_ACK,
60 FCPIO_TABORT,
61 FCPIO_TABORT_CMPL,
62
63 /*
64 * Misc request types
65 */
66 FCPIO_ACK = 0x20,
67 FCPIO_RESET,
68 FCPIO_RESET_CMPL,
69 FCPIO_FLOGI_REG,
70 FCPIO_FLOGI_REG_CMPL,
71 FCPIO_ECHO,
72 FCPIO_ECHO_CMPL,
73 FCPIO_LUNMAP_CHNG,
74 FCPIO_LUNMAP_REQ,
75 FCPIO_LUNMAP_REQ_CMPL,
76 FCPIO_FLOGI_FIP_REG,
77 FCPIO_FLOGI_FIP_REG_CMPL,
78};
79
80/*
81 * Header status codes from the firmware
82 */
83enum fcpio_status {
84 FCPIO_SUCCESS = 0, /* request was successful */
85
86 /*
87 * If a request to the firmware is rejected, the original request
88 * header will be returned with the status set to one of the following:
89 */
90 FCPIO_INVALID_HEADER, /* header contains invalid data */
91 FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
92 FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
93 FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
94 FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
95
96 /*
97 * Once a request is processed, the firmware will usually return
98 * a cmpl message type. In cases where errors occurred,
99 * the header status field would be filled in with one of the following:
100 */
101 FCPIO_ABORTED = 0x41, /* request was aborted */
102 FCPIO_TIMEOUT, /* request was timed out */
103 FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
104 FCPIO_MSS_INVALID, /* request was aborted due to mss error */
105 FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
106 FCPIO_FW_ERR, /* request was terminated due to fw error */
107 FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
108 FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
109 FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
110 FCPIO_CMND_REJECTED, /* request was invalid and rejected */
111 FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
112 FCPIO_PATH_FAILED, /* i/o sent to current path failed */
113 FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
114};
115
116/*
117 * The header command tag. All host requests will use the "tag" field
118 * to mark commands with a unique tag. When the firmware responds to
119 * a host request, it will copy the tag field into the response.
120 *
121 * The only firmware requests that will use the rx_id/ox_id fields instead
122 * of the tag field will be the target command and target task management
123 * requests. These two requests do not have corresponding host requests
124 * since they come directly from the FC initiator on the network.
125 */
126struct fcpio_tag {
127 union {
128 u32 req_id;
129 struct {
130 u16 rx_id;
131 u16 ox_id;
132 } ex_id;
133 } u;
134};
135
136static inline void
137fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
138{
139 tag->u.req_id = id;
140}
141
142static inline void
143fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
144{
145 *id = tag->u.req_id;
146}
147
148static inline void
149fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
150{
151 tag->u.ex_id.rx_id = rx_id;
152 tag->u.ex_id.ox_id = ox_id;
153}
154
155static inline void
156fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
157{
158 *rx_id = tag->u.ex_id.rx_id;
159 *ox_id = tag->u.ex_id.ox_id;
160}
161
162/*
163 * The header for an fcpio request, whether from the firmware or from the
164 * host driver
165 */
166struct fcpio_header {
167 u8 type; /* enum fcpio_type */
168 u8 status; /* header status entry */
169 u16 _resvd; /* reserved */
170 struct fcpio_tag tag; /* header tag */
171};
172
173static inline void
174fcpio_header_enc(struct fcpio_header *hdr,
175 u8 type, u8 status,
176 struct fcpio_tag tag)
177{
178 hdr->type = type;
179 hdr->status = status;
180 hdr->_resvd = 0;
181 hdr->tag = tag;
182}
183
184static inline void
185fcpio_header_dec(struct fcpio_header *hdr,
186 u8 *type, u8 *status,
187 struct fcpio_tag *tag)
188{
189 *type = hdr->type;
190 *status = hdr->status;
191 *tag = hdr->tag;
192}
193
194#define CDB_16 16
195#define CDB_32 32
196#define LUN_ADDRESS 8
197
198/*
199 * fcpio_icmnd_16: host -> firmware request
200 *
201 * used for sending out an initiator SCSI 16-byte command
202 */
203struct fcpio_icmnd_16 {
204 u32 lunmap_id; /* index into lunmap table */
205 u8 special_req_flags; /* special exchange request flags */
206 u8 _resvd0[3]; /* reserved */
207 u32 sgl_cnt; /* scatter-gather list count */
208 u32 sense_len; /* sense buffer length */
209 u64 sgl_addr; /* scatter-gather list addr */
210 u64 sense_addr; /* sense buffer address */
211 u8 crn; /* SCSI Command Reference No. */
212 u8 pri_ta; /* SCSI Priority and Task attribute */
213 u8 _resvd1; /* reserved: should be 0 */
214 u8 flags; /* command flags */
215 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
216 u32 data_len; /* length of data expected */
217 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
218 u8 _resvd2; /* reserved */
219 u8 d_id[3]; /* FC vNIC only: Target D_ID */
220 u16 mss; /* FC vNIC only: max burst */
221 u16 _resvd3; /* reserved */
222 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
223 u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
224};
225
226/*
227 * Special request flags
228 */
229#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
230
231/*
232 * Priority/Task Attribute settings
233 */
234#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
235#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
236#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
237#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
238#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
239
240/*
241 * Command flags
242 */
243#define FCPIO_ICMND_RDDATA 0x02 /* read data */
244#define FCPIO_ICMND_WRDATA 0x01 /* write data */
245
246/*
247 * fcpio_icmnd_32: host -> firmware request
248 *
249 * used for sending out an initiator SCSI 32-byte command
250 */
251struct fcpio_icmnd_32 {
252 u32 lunmap_id; /* index into lunmap table */
253 u8 special_req_flags; /* special exchange request flags */
254 u8 _resvd0[3]; /* reserved */
255 u32 sgl_cnt; /* scatter-gather list count */
256 u32 sense_len; /* sense buffer length */
257 u64 sgl_addr; /* scatter-gather list addr */
258 u64 sense_addr; /* sense buffer address */
259 u8 crn; /* SCSI Command Reference No. */
260 u8 pri_ta; /* SCSI Priority and Task attribute */
261 u8 _resvd1; /* reserved: should be 0 */
262 u8 flags; /* command flags */
263 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
264 u32 data_len; /* length of data expected */
265 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
266 u8 _resvd2; /* reserved */
267 u8 d_id[3]; /* FC vNIC only: Target D_ID */
268 u16 mss; /* FC vNIC only: max burst */
269 u16 _resvd3; /* reserved */
270 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
271 u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
272};
273
274/*
275 * fcpio_itmf: host -> firmware request
276 *
277 * used for requesting the firmware to abort a request and/or send out
278 * a task management function
279 *
280 * The t_tag field is only needed when the request type is ABT_TASK.
281 */
282struct fcpio_itmf {
283 u32 lunmap_id; /* index into lunmap table */
284 u32 tm_req; /* SCSI Task Management request */
285 u32 t_tag; /* header tag of fcpio to be aborted */
286 u32 _resvd; /* _reserved */
287 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
288 u8 _resvd1; /* reserved */
289 u8 d_id[3]; /* FC vNIC only: Target D_ID */
290 u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
291 u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
292};
293
294/*
295 * Task Management request
296 */
297enum fcpio_itmf_tm_req_type {
298 FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
299 FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
300 FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
301 FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
302 FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
303 FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
304};
305
306/*
307 * fcpio_tdata: host -> firmware request
308 *
309 * used for requesting the firmware to send out a read data transfer for a
310 * target command
311 */
312struct fcpio_tdata {
313 u16 rx_id; /* FC rx_id of target command */
314 u16 flags; /* command flags */
315 u32 rel_offset; /* data sequence relative offset */
316 u32 sgl_cnt; /* scatter-gather list count */
317 u32 data_len; /* length of data expected to send */
318 u64 sgl_addr; /* scatter-gather list address */
319};
320
321/*
322 * Command flags
323 */
324#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
325
326/*
327 * fcpio_txrdy: host -> firmware request
328 *
329 * used for requesting the firmware to send out a write data transfer for a
330 * target command
331 */
332struct fcpio_txrdy {
333 u16 rx_id; /* FC rx_id of target command */
334 u16 _resvd0; /* reserved */
335 u32 rel_offset; /* data sequence relative offset */
336 u32 sgl_cnt; /* scatter-gather list count */
337 u32 data_len; /* length of data expected to send */
338 u64 sgl_addr; /* scatter-gather list address */
339};
340
341/*
342 * fcpio_trsp: host -> firmware request
343 *
344 * used for requesting the firmware to send out a response for a target
345 * command
346 */
347struct fcpio_trsp {
348 u16 rx_id; /* FC rx_id of target command */
349 u16 _resvd0; /* reserved */
350 u32 sense_len; /* sense data buffer length */
351 u64 sense_addr; /* sense data buffer address */
352 u16 _resvd1; /* reserved */
353 u8 flags; /* response request flags */
354 u8 scsi_status; /* SCSI status */
355 u32 residual; /* SCSI data residual value of I/O */
356};
357
358/*
359 * resposnse request flags
360 */
361#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
362#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
363
364/*
365 * fcpio_ttmf_ack: host -> firmware response
366 *
367 * used by the host to indicate to the firmware it has received and processed
368 * the target tmf request
369 */
370struct fcpio_ttmf_ack {
371 u16 rx_id; /* FC rx_id of target command */
372 u16 _resvd0; /* reserved */
373 u32 tmf_status; /* SCSI task management status */
374};
375
376/*
377 * fcpio_tabort: host -> firmware request
378 *
379 * used by the host to request the firmware to abort a target request that was
380 * received by the firmware
381 */
382struct fcpio_tabort {
383 u16 rx_id; /* rx_id of the target request */
384};
385
386/*
387 * fcpio_reset: host -> firmware request
388 *
389 * used by the host to signal a reset of the driver to the firmware
390 * and to request firmware to clean up all outstanding I/O
391 */
392struct fcpio_reset {
393 u32 _resvd;
394};
395
396enum fcpio_flogi_reg_format_type {
397 FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
398 FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
399};
400
401/*
402 * fcpio_flogi_reg: host -> firmware request
403 *
404 * fc vnic only
405 * used by the host to notify the firmware of the lif's s_id
406 * and destination mac address format
407 */
408struct fcpio_flogi_reg {
409 u8 format;
410 u8 s_id[3]; /* FC vNIC only: Source S_ID */
411 u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
412 u16 _resvd;
413 u32 r_a_tov; /* R_A_TOV in msec */
414 u32 e_d_tov; /* E_D_TOV in msec */
415};
416
417/*
418 * fcpio_echo: host -> firmware request
419 *
420 * sends a heartbeat echo request to the firmware
421 */
422struct fcpio_echo {
423 u32 _resvd;
424};
425
426/*
427 * fcpio_lunmap_req: host -> firmware request
428 *
429 * scsi vnic only
430 * sends a request to retrieve the lunmap table for scsi vnics
431 */
432struct fcpio_lunmap_req {
433 u64 addr; /* address of the buffer */
434 u32 len; /* len of the buffer */
435};
436
437/*
438 * fcpio_flogi_fip_reg: host -> firmware request
439 *
440 * fc vnic only
441 * used by the host to notify the firmware of the lif's s_id
442 * and destination mac address format
443 */
444struct fcpio_flogi_fip_reg {
445 u8 _resvd0;
446 u8 s_id[3]; /* FC vNIC only: Source S_ID */
447 u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
448 u16 _resvd1;
449 u32 r_a_tov; /* R_A_TOV in msec */
450 u32 e_d_tov; /* E_D_TOV in msec */
451 u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
452 u16 _resvd2;
453};
454
455/*
456 * Basic structure for all fcpio structures that are sent from the host to the
457 * firmware. They are 128 bytes per structure.
458 */
459#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
460
461struct fcpio_host_req {
462 struct fcpio_header hdr;
463
464 union {
465 /*
466 * Defines space needed for request
467 */
468 u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
469
470 /*
471 * Initiator host requests
472 */
473 struct fcpio_icmnd_16 icmnd_16;
474 struct fcpio_icmnd_32 icmnd_32;
475 struct fcpio_itmf itmf;
476
477 /*
478 * Target host requests
479 */
480 struct fcpio_tdata tdata;
481 struct fcpio_txrdy txrdy;
482 struct fcpio_trsp trsp;
483 struct fcpio_ttmf_ack ttmf_ack;
484 struct fcpio_tabort tabort;
485
486 /*
487 * Misc requests
488 */
489 struct fcpio_reset reset;
490 struct fcpio_flogi_reg flogi_reg;
491 struct fcpio_echo echo;
492 struct fcpio_lunmap_req lunmap_req;
493 struct fcpio_flogi_fip_reg flogi_fip_reg;
494 } u;
495};
496
497/*
498 * fcpio_icmnd_cmpl: firmware -> host response
499 *
500 * used for sending the host a response to an initiator command
501 */
502struct fcpio_icmnd_cmpl {
503 u8 _resvd0[6]; /* reserved */
504 u8 flags; /* response flags */
505 u8 scsi_status; /* SCSI status */
506 u32 residual; /* SCSI data residual length */
507 u32 sense_len; /* SCSI sense length */
508};
509
510/*
511 * response flags
512 */
513#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
514#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
515
516/*
517 * fcpio_itmf_cmpl: firmware -> host response
518 *
519 * used for sending the host a response for a itmf request
520 */
521struct fcpio_itmf_cmpl {
522 u32 _resvd; /* reserved */
523};
524
525/*
526 * fcpio_tcmnd_16: firmware -> host request
527 *
528 * used by the firmware to notify the host of an incoming target SCSI 16-Byte
529 * request
530 */
531struct fcpio_tcmnd_16 {
532 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
533 u8 crn; /* SCSI Command Reference No. */
534 u8 pri_ta; /* SCSI Priority and Task attribute */
535 u8 _resvd2; /* reserved: should be 0 */
536 u8 flags; /* command flags */
537 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
538 u32 data_len; /* length of data expected */
539 u8 _resvd1; /* reserved */
540 u8 s_id[3]; /* FC vNIC only: Source S_ID */
541};
542
543/*
544 * Priority/Task Attribute settings
545 */
546#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
547#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
548#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
549#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
550#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
551
552/*
553 * Command flags
554 */
555#define FCPIO_TCMND_RDDATA 0x02 /* read data */
556#define FCPIO_TCMND_WRDATA 0x01 /* write data */
557
558/*
559 * fcpio_tcmnd_32: firmware -> host request
560 *
561 * used by the firmware to notify the host of an incoming target SCSI 32-Byte
562 * request
563 */
564struct fcpio_tcmnd_32 {
565 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
566 u8 crn; /* SCSI Command Reference No. */
567 u8 pri_ta; /* SCSI Priority and Task attribute */
568 u8 _resvd2; /* reserved: should be 0 */
569 u8 flags; /* command flags */
570 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
571 u32 data_len; /* length of data expected */
572 u8 _resvd0; /* reserved */
573 u8 s_id[3]; /* FC vNIC only: Source S_ID */
574};
575
576/*
577 * fcpio_tdrsp_cmpl: firmware -> host response
578 *
579 * used by the firmware to notify the host of a response to a host target
580 * command
581 */
582struct fcpio_tdrsp_cmpl {
583 u16 rx_id; /* rx_id of the target request */
584 u16 _resvd0; /* reserved */
585};
586
587/*
588 * fcpio_ttmf: firmware -> host request
589 *
590 * used by the firmware to notify the host of an incoming task management
591 * function request
592 */
593struct fcpio_ttmf {
594 u8 _resvd0; /* reserved */
595 u8 s_id[3]; /* FC vNIC only: Source S_ID */
596 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
597 u8 crn; /* SCSI Command Reference No. */
598 u8 _resvd2[3]; /* reserved */
599 u32 tmf_type; /* task management request type */
600};
601
602/*
603 * Task Management request
604 */
605#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
606#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
607#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
608#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
609#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
610
611/*
612 * fcpio_tabort_cmpl: firmware -> host response
613 *
614 * used by the firmware to respond to a host's tabort request
615 */
616struct fcpio_tabort_cmpl {
617 u16 rx_id; /* rx_id of the target request */
618 u16 _resvd0; /* reserved */
619};
620
621/*
622 * fcpio_ack: firmware -> host response
623 *
624 * used by firmware to notify the host of the last work request received
625 */
626struct fcpio_ack {
627 u16 request_out; /* last host entry received */
628 u16 _resvd;
629};
630
631/*
632 * fcpio_reset_cmpl: firmware -> host response
633 *
634 * use by firmware to respond to the host's reset request
635 */
636struct fcpio_reset_cmpl {
637 u16 vnic_id;
638};
639
640/*
641 * fcpio_flogi_reg_cmpl: firmware -> host response
642 *
643 * fc vnic only
644 * response to the fcpio_flogi_reg request
645 */
646struct fcpio_flogi_reg_cmpl {
647 u32 _resvd;
648};
649
650/*
651 * fcpio_echo_cmpl: firmware -> host response
652 *
653 * response to the fcpio_echo request
654 */
655struct fcpio_echo_cmpl {
656 u32 _resvd;
657};
658
659/*
660 * fcpio_lunmap_chng: firmware -> host notification
661 *
662 * scsi vnic only
663 * notifies the host that the lunmap tables have changed
664 */
665struct fcpio_lunmap_chng {
666 u32 _resvd;
667};
668
669/*
670 * fcpio_lunmap_req_cmpl: firmware -> host response
671 *
672 * scsi vnic only
673 * response for lunmap table request from the host
674 */
675struct fcpio_lunmap_req_cmpl {
676 u32 _resvd;
677};
678
679/*
680 * Basic structure for all fcpio structures that are sent from the firmware to
681 * the host. They are 64 bytes per structure.
682 */
683#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
684struct fcpio_fw_req {
685 struct fcpio_header hdr;
686
687 union {
688 /*
689 * Defines space needed for request
690 */
691 u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
692
693 /*
694 * Initiator firmware responses
695 */
696 struct fcpio_icmnd_cmpl icmnd_cmpl;
697 struct fcpio_itmf_cmpl itmf_cmpl;
698
699 /*
700 * Target firmware new requests
701 */
702 struct fcpio_tcmnd_16 tcmnd_16;
703 struct fcpio_tcmnd_32 tcmnd_32;
704
705 /*
706 * Target firmware responses
707 */
708 struct fcpio_tdrsp_cmpl tdrsp_cmpl;
709 struct fcpio_ttmf ttmf;
710 struct fcpio_tabort_cmpl tabort_cmpl;
711
712 /*
713 * Firmware response to work received
714 */
715 struct fcpio_ack ack;
716
717 /*
718 * Misc requests
719 */
720 struct fcpio_reset_cmpl reset_cmpl;
721 struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
722 struct fcpio_echo_cmpl echo_cmpl;
723 struct fcpio_lunmap_chng lunmap_chng;
724 struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
725 } u;
726};
727
728/*
729 * Access routines to encode and decode the color bit, which is the most
730 * significant bit of the MSB of the structure
731 */
732static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
733{
734 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
735
736 if (color)
737 *c |= 0x80;
738 else
739 *c &= ~0x80;
740}
741
742static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
743{
744 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
745
746 *color = *c >> 7;
747
748 /*
749 * Make sure color bit is read from desc *before* other fields
750 * are read from desc. Hardware guarantees color bit is last
751 * bit (byte) written. Adding the rmb() prevents the compiler
752 * and/or CPU from reordering the reads which would potentially
753 * result in reading stale values.
754 */
755
756 rmb();
757
758}
759
760/*
761 * Lunmap table entry for scsi vnics
762 */
763#define FCPIO_LUNMAP_TABLE_SIZE 256
764#define FCPIO_FLAGS_LUNMAP_VALID 0x80
765#define FCPIO_FLAGS_BOOT 0x01
766struct fcpio_lunmap_entry {
767 u8 bus;
768 u8 target;
769 u8 lun;
770 u8 path_cnt;
771 u16 flags;
772 u16 update_cnt;
773};
774
775struct fcpio_lunmap_tbl {
776 u32 update_cnt;
777 struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
778};
779
780#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 00000000000..e4c0a3d7d87
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,265 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_H_
19#define _FNIC_H_
20
21#include <linux/interrupt.h>
22#include <linux/netdevice.h>
23#include <linux/workqueue.h>
24#include <scsi/libfc.h>
25#include "fnic_io.h"
26#include "fnic_res.h"
27#include "vnic_dev.h"
28#include "vnic_wq.h"
29#include "vnic_rq.h"
30#include "vnic_cq.h"
31#include "vnic_wq_copy.h"
32#include "vnic_intr.h"
33#include "vnic_stats.h"
34#include "vnic_scsi.h"
35
36#define DRV_NAME "fnic"
37#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
38#define DRV_VERSION "1.0.0.1121"
39#define PFX DRV_NAME ": "
40#define DFX DRV_NAME "%d: "
41
42#define DESC_CLEAN_LOW_WATERMARK 8
43#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
44#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
45#define FNIC_DFLT_QUEUE_DEPTH 32
46#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
47
48/*
49 * Tag bits used for special requests.
50 */
51#define BIT(nr) (1UL << (nr))
52#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
53#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
54#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
55#define FNIC_NO_TAG -1
56
57/*
58 * Usage of the scsi_cmnd scratchpad.
59 * These fields are locked by the hashed io_req_lock.
60 */
61#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
62#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
63#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
64#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
65#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
66
67#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
68
69#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
70#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
71#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
72#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
73
74#define FNIC_MAX_FCP_TARGET 256
75
76extern unsigned int fnic_log_level;
77
78#define FNIC_MAIN_LOGGING 0x01
79#define FNIC_FCS_LOGGING 0x02
80#define FNIC_SCSI_LOGGING 0x04
81#define FNIC_ISR_LOGGING 0x08
82
83#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
84do { \
85 if (unlikely(fnic_log_level & LEVEL)) \
86 do { \
87 CMD; \
88 } while (0); \
89} while (0)
90
91#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
92 FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
93 shost_printk(kern_level, host, fmt, ##args);)
94
95#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
96 FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
97 shost_printk(kern_level, host, fmt, ##args);)
98
99#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
100 FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
101 shost_printk(kern_level, host, fmt, ##args);)
102
103#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
104 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
105 shost_printk(kern_level, host, fmt, ##args);)
106
107extern const char *fnic_state_str[];
108
109enum fnic_intx_intr_index {
110 FNIC_INTX_WQ_RQ_COPYWQ,
111 FNIC_INTX_ERR,
112 FNIC_INTX_NOTIFY,
113 FNIC_INTX_INTR_MAX,
114};
115
116enum fnic_msix_intr_index {
117 FNIC_MSIX_RQ,
118 FNIC_MSIX_WQ,
119 FNIC_MSIX_WQ_COPY,
120 FNIC_MSIX_ERR_NOTIFY,
121 FNIC_MSIX_INTR_MAX,
122};
123
124struct fnic_msix_entry {
125 int requested;
126 char devname[IFNAMSIZ];
127 irqreturn_t (*isr)(int, void *);
128 void *devid;
129};
130
131enum fnic_state {
132 FNIC_IN_FC_MODE = 0,
133 FNIC_IN_FC_TRANS_ETH_MODE,
134 FNIC_IN_ETH_MODE,
135 FNIC_IN_ETH_TRANS_FC_MODE,
136};
137
138#define FNIC_WQ_COPY_MAX 1
139#define FNIC_WQ_MAX 1
140#define FNIC_RQ_MAX 1
141#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
142
143struct mempool;
144
145/* Per-instance private data structure */
146struct fnic {
147 struct fc_lport *lport;
148 struct vnic_dev_bar bar0;
149
150 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
151 struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
152
153 struct vnic_stats *stats;
154 unsigned long stats_time; /* time of stats update */
155 struct vnic_nic_cfg *nic_cfg;
156 char name[IFNAMSIZ];
157 struct timer_list notify_timer; /* used for MSI interrupts */
158
159 unsigned int err_intr_offset;
160 unsigned int link_intr_offset;
161
162 unsigned int wq_count;
163 unsigned int cq_count;
164
165 u32 fcoui_mode:1; /* use fcoui address*/
166 u32 vlan_hw_insert:1; /* let hw insert the tag */
167 u32 in_remove:1; /* fnic device in removal */
168 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
169
170 struct completion *remove_wait; /* device remove thread blocks */
171
172 struct fc_frame *flogi;
173 struct fc_frame *flogi_resp;
174 u16 flogi_oxid;
175 unsigned long s_id;
176 enum fnic_state state;
177 spinlock_t fnic_lock;
178
179 u16 vlan_id; /* VLAN tag including priority */
180 u8 mac_addr[ETH_ALEN];
181 u8 dest_addr[ETH_ALEN];
182 u8 data_src_addr[ETH_ALEN];
183 u64 fcp_input_bytes; /* internal statistic */
184 u64 fcp_output_bytes; /* internal statistic */
185 u32 link_down_cnt;
186 int link_status;
187
188 struct list_head list;
189 struct pci_dev *pdev;
190 struct vnic_fc_config config;
191 struct vnic_dev *vdev;
192 unsigned int raw_wq_count;
193 unsigned int wq_copy_count;
194 unsigned int rq_count;
195 int fw_ack_index[FNIC_WQ_COPY_MAX];
196 unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
197 unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
198 unsigned int intr_count;
199 u32 __iomem *legacy_pba;
200 struct fnic_host_tag *tags;
201 mempool_t *io_req_pool;
202 mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
203 spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
204
205 struct work_struct link_work;
206 struct work_struct frame_work;
207 struct sk_buff_head frame_queue;
208
209 /* copy work queue cache line section */
210 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
211 /* completion queue cache line section */
212 ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
213
214 spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
215
216 /* work queue cache line section */
217 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
218 spinlock_t wq_lock[FNIC_WQ_MAX];
219
220 /* receive queue cache line section */
221 ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
222
223 /* interrupt resource cache line section */
224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
225};
226
227extern struct workqueue_struct *fnic_event_queue;
228extern struct device_attribute *fnic_attrs[];
229
230void fnic_clear_intr_mode(struct fnic *fnic);
231int fnic_set_intr_mode(struct fnic *fnic);
232void fnic_free_intr(struct fnic *fnic);
233int fnic_request_intr(struct fnic *fnic);
234
235int fnic_send(struct fc_lport *, struct fc_frame *);
236void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
237void fnic_handle_frame(struct work_struct *work);
238void fnic_handle_link(struct work_struct *work);
239int fnic_rq_cmpl_handler(struct fnic *fnic, int);
240int fnic_alloc_rq_frame(struct vnic_rq *rq);
241void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
242int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
243
244int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
245int fnic_abort_cmd(struct scsi_cmnd *);
246int fnic_device_reset(struct scsi_cmnd *);
247int fnic_host_reset(struct scsi_cmnd *);
248int fnic_reset(struct Scsi_Host *);
249void fnic_scsi_cleanup(struct fc_lport *);
250void fnic_scsi_abort_io(struct fc_lport *);
251void fnic_empty_scsi_cleanup(struct fc_lport *);
252void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
253int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
254int fnic_wq_cmpl_handler(struct fnic *fnic, int);
255int fnic_flogi_reg_handler(struct fnic *fnic);
256void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
257 struct fcpio_host_req *desc);
258int fnic_fw_reset_handler(struct fnic *fnic);
259void fnic_terminate_rport_io(struct fc_rport *);
260const char *fnic_state_to_str(unsigned int state);
261
262void fnic_log_q_error(struct fnic *fnic);
263void fnic_handle_link_event(struct fnic *fnic);
264
265#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 00000000000..aea0c3becfd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/device.h>
20#include <scsi/scsi_host.h>
21#include "fnic.h"
22
23static ssize_t fnic_show_state(struct device *dev,
24 struct device_attribute *attr, char *buf)
25{
26 struct fc_lport *lp = shost_priv(class_to_shost(dev));
27 struct fnic *fnic = lport_priv(lp);
28
29 return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
30}
31
32static ssize_t fnic_show_drv_version(struct device *dev,
33 struct device_attribute *attr, char *buf)
34{
35 return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
36}
37
38static ssize_t fnic_show_link_state(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct fc_lport *lp = shost_priv(class_to_shost(dev));
42
43 return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
44 ? "Link Up" : "Link Down");
45}
46
47static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
48static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
49static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
50
51struct device_attribute *fnic_attrs[] = {
52 &dev_attr_fnic_state,
53 &dev_attr_drv_version,
54 &dev_attr_link_state,
55 NULL,
56};
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 00000000000..07e6eedb83c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,742 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/skbuff.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/if_ether.h>
24#include <linux/if_vlan.h>
25#include <linux/workqueue.h>
26#include <scsi/fc/fc_els.h>
27#include <scsi/fc/fc_fcoe.h>
28#include <scsi/fc_frame.h>
29#include <scsi/libfc.h>
30#include "fnic_io.h"
31#include "fnic.h"
32#include "cq_enet_desc.h"
33#include "cq_exch_desc.h"
34
35struct workqueue_struct *fnic_event_queue;
36
37void fnic_handle_link(struct work_struct *work)
38{
39 struct fnic *fnic = container_of(work, struct fnic, link_work);
40 unsigned long flags;
41 int old_link_status;
42 u32 old_link_down_cnt;
43
44 spin_lock_irqsave(&fnic->fnic_lock, flags);
45
46 if (fnic->stop_rx_link_events) {
47 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
48 return;
49 }
50
51 old_link_down_cnt = fnic->link_down_cnt;
52 old_link_status = fnic->link_status;
53 fnic->link_status = vnic_dev_link_status(fnic->vdev);
54 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
55
56 if (old_link_status == fnic->link_status) {
57 if (!fnic->link_status)
58 /* DOWN -> DOWN */
59 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60 else {
61 if (old_link_down_cnt != fnic->link_down_cnt) {
62 /* UP -> DOWN -> UP */
63 fnic->lport->host_stats.link_failure_count++;
64 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66 "link down\n");
67 fc_linkdown(fnic->lport);
68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 "link up\n");
70 fc_linkup(fnic->lport);
71 } else
72 /* UP -> UP */
73 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
74 }
75 } else if (fnic->link_status) {
76 /* DOWN -> UP */
77 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79 fc_linkup(fnic->lport);
80 } else {
81 /* UP -> DOWN */
82 fnic->lport->host_stats.link_failure_count++;
83 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85 fc_linkdown(fnic->lport);
86 }
87
88}
89
90/*
91 * This function passes incoming fabric frames to libFC
92 */
93void fnic_handle_frame(struct work_struct *work)
94{
95 struct fnic *fnic = container_of(work, struct fnic, frame_work);
96 struct fc_lport *lp = fnic->lport;
97 unsigned long flags;
98 struct sk_buff *skb;
99 struct fc_frame *fp;
100
101 while ((skb = skb_dequeue(&fnic->frame_queue))) {
102
103 spin_lock_irqsave(&fnic->fnic_lock, flags);
104 if (fnic->stop_rx_link_events) {
105 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106 dev_kfree_skb(skb);
107 return;
108 }
109 fp = (struct fc_frame *)skb;
110 /* if Flogi resp frame, register the address */
111 if (fr_flags(fp)) {
112 vnic_dev_add_addr(fnic->vdev,
113 fnic->data_src_addr);
114 fr_flags(fp) = 0;
115 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117
118 fc_exch_recv(lp, lp->emp, fp);
119 }
120
121}
122
123static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124 u32 len, u8 sof, u8 eof)
125{
126 struct fc_frame *fp = (struct fc_frame *)skb;
127
128 skb_trim(skb, len);
129 fr_eof(fp) = eof;
130 fr_sof(fp) = sof;
131}
132
133
134static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
135{
136 struct fc_frame *fp;
137 struct ethhdr *eh;
138 struct vlan_ethhdr *vh;
139 struct fcoe_hdr *fcoe_hdr;
140 struct fcoe_crc_eof *ft;
141 u32 transport_len = 0;
142
143 eh = (struct ethhdr *)skb->data;
144 vh = (struct vlan_ethhdr *)skb->data;
145 if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
146 vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
147 skb_pull(skb, sizeof(struct vlan_ethhdr));
148 transport_len += sizeof(struct vlan_ethhdr);
149 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
150 transport_len += sizeof(struct ethhdr);
151 skb_pull(skb, sizeof(struct ethhdr));
152 } else
153 return -1;
154
155 fcoe_hdr = (struct fcoe_hdr *)skb->data;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157 return -1;
158
159 fp = (struct fc_frame *)skb;
160 fc_frame_init(fp);
161 fr_sof(fp) = fcoe_hdr->fcoe_sof;
162 skb_pull(skb, sizeof(struct fcoe_hdr));
163 transport_len += sizeof(struct fcoe_hdr);
164
165 ft = (struct fcoe_crc_eof *)(skb->data + len -
166 transport_len - sizeof(*ft));
167 fr_eof(fp) = ft->fcoe_eof;
168 skb_trim(skb, len - transport_len - sizeof(*ft));
169 return 0;
170}
171
172static inline int fnic_handle_flogi_resp(struct fnic *fnic,
173 struct fc_frame *fp)
174{
175 u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
176 struct ethhdr *eth_hdr;
177 struct fc_frame_header *fh;
178 int ret = 0;
179 unsigned long flags;
180 struct fc_frame *old_flogi_resp = NULL;
181
182 fh = (struct fc_frame_header *)fr_hdr(fp);
183
184 spin_lock_irqsave(&fnic->fnic_lock, flags);
185
186 if (fnic->state == FNIC_IN_ETH_MODE) {
187
188 /*
189 * Check if oxid matches on taking the lock. A new Flogi
190 * issued by libFC might have changed the fnic cached oxid
191 */
192 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
193 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
194 "Flogi response oxid not"
195 " matching cached oxid, dropping frame"
196 "\n");
197 ret = -1;
198 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
199 dev_kfree_skb_irq(fp_skb(fp));
200 goto handle_flogi_resp_end;
201 }
202
203 /* Drop older cached flogi response frame, cache this frame */
204 old_flogi_resp = fnic->flogi_resp;
205 fnic->flogi_resp = fp;
206 fnic->flogi_oxid = FC_XID_UNKNOWN;
207
208 /*
209 * this frame is part of flogi get the src mac addr from this
210 * frame if the src mac is fcoui based then we mark the
211 * address mode flag to use fcoui base for dst mac addr
212 * otherwise we have to store the fcoe gateway addr
213 */
214 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
215 memcpy(mac, eth_hdr->h_source, ETH_ALEN);
216
217 if (ntoh24(mac) == FC_FCOE_OUI)
218 fnic->fcoui_mode = 1;
219 else {
220 fnic->fcoui_mode = 0;
221 memcpy(fnic->dest_addr, mac, ETH_ALEN);
222 }
223
224 /*
225 * Except for Flogi frame, all outbound frames from us have the
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
227 * the vnic MAC address as the Eth Src address
228 */
229 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic->s_id = ntoh24(fh->fh_d_id);
233
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236
237 } else {
238 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239 "Unexpected fnic state %s while"
240 " processing flogi resp\n",
241 fnic_state_to_str(fnic->state));
242 ret = -1;
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244 dev_kfree_skb_irq(fp_skb(fp));
245 goto handle_flogi_resp_end;
246 }
247
248 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250 /* Drop older cached frame */
251 if (old_flogi_resp)
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253
254 /*
255 * send flogi reg request to firmware, this will put the fnic in
256 * in FC mode
257 */
258 ret = fnic_flogi_reg_handler(fnic);
259
260 if (ret < 0) {
261 int free_fp = 1;
262 spin_lock_irqsave(&fnic->fnic_lock, flags);
263 /*
264 * free the frame is some other thread is not
265 * pointing to it
266 */
267 if (fnic->flogi_resp != fp)
268 free_fp = 0;
269 else
270 fnic->flogi_resp = NULL;
271
272 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273 fnic->state = FNIC_IN_ETH_MODE;
274 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
275 if (free_fp)
276 dev_kfree_skb_irq(fp_skb(fp));
277 }
278
279 handle_flogi_resp_end:
280 return ret;
281}
282
283/* Returns 1 for a response that matches cached flogi oxid */
284static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285 struct fc_frame *fp)
286{
287 struct fc_frame_header *fh;
288 int ret = 0;
289 u32 f_ctl;
290
291 fh = fc_frame_header_get(fp);
292 f_ctl = ntoh24(fh->fh_f_ctl);
293
294 if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295 fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296 (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297 fh->fh_type == FC_TYPE_ELS)
298 ret = 1;
299
300 return ret;
301}
302
303static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
304 *cq_desc, struct vnic_rq_buf *buf,
305 int skipped __attribute__((unused)),
306 void *opaque)
307{
308 struct fnic *fnic = vnic_dev_priv(rq->vdev);
309 struct sk_buff *skb;
310 struct fc_frame *fp;
311 unsigned int eth_hdrs_stripped;
312 u8 type, color, eop, sop, ingress_port, vlan_stripped;
313 u8 fcoe = 0, fcoe_sof, fcoe_eof;
314 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
315 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
316 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
317 u8 fcs_ok = 1, packet_error = 0;
318 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
319 u32 rss_hash;
320 u16 exchange_id, tmpl;
321 u8 sof = 0;
322 u8 eof = 0;
323 u32 fcp_bytes_written = 0;
324 unsigned long flags;
325
326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327 PCI_DMA_FROMDEVICE);
328 skb = buf->os_buf;
329 buf->os_buf = NULL;
330
331 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
332 if (type == CQ_DESC_TYPE_RQ_FCP) {
333 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
334 &type, &color, &q_number, &completed_index,
335 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
336 &tmpl, &fcp_bytes_written, &sof, &eof,
337 &ingress_port, &packet_error,
338 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339 &vlan);
340 eth_hdrs_stripped = 1;
341
342 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
344 &type, &color, &q_number, &completed_index,
345 &ingress_port, &fcoe, &eop, &sop,
346 &rss_type, &csum_not_calc, &rss_hash,
347 &bytes_written, &packet_error,
348 &vlan_stripped, &vlan, &checksum,
349 &fcoe_sof, &fcoe_fc_crc_ok,
350 &fcoe_enc_error, &fcoe_eof,
351 &tcp_udp_csum_ok, &udp, &tcp,
352 &ipv4_csum_ok, &ipv6, &ipv4,
353 &ipv4_fragment, &fcs_ok);
354 eth_hdrs_stripped = 0;
355
356 } else {
357 /* wrong CQ type*/
358 shost_printk(KERN_ERR, fnic->lport->host,
359 "fnic rq_cmpl wrong cq type x%x\n", type);
360 goto drop;
361 }
362
363 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
364 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
365 "fnic rq_cmpl fcoe x%x fcsok x%x"
366 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
367 " x%x\n",
368 fcoe, fcs_ok, packet_error,
369 fcoe_fc_crc_ok, fcoe_enc_error);
370 goto drop;
371 }
372
373 if (eth_hdrs_stripped)
374 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375 else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376 goto drop;
377
378 fp = (struct fc_frame *)skb;
379
380 /*
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
384 */
385 if (is_matching_flogi_resp_frame(fnic, fp)) {
386 if (!eth_hdrs_stripped) {
387 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388 fnic_handle_flogi_resp(fnic, fp);
389 return;
390 }
391 /*
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
394 */
395 goto forward;
396 }
397 goto drop;
398 }
399 if (!eth_hdrs_stripped)
400 goto drop;
401
402forward:
403 spin_lock_irqsave(&fnic->fnic_lock, flags);
404 if (fnic->stop_rx_link_events) {
405 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406 goto drop;
407 }
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
409 fr_flags(fp) = 0;
410 fr_dev(fp) = fnic->lport;
411 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412
413 skb_queue_tail(&fnic->frame_queue, skb);
414 queue_work(fnic_event_queue, &fnic->frame_work);
415
416 return;
417drop:
418 dev_kfree_skb_irq(skb);
419}
420
421static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
422 struct cq_desc *cq_desc, u8 type,
423 u16 q_number, u16 completed_index,
424 void *opaque)
425{
426 struct fnic *fnic = vnic_dev_priv(vdev);
427
428 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
429 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
430 NULL);
431 return 0;
432}
433
434int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
435{
436 unsigned int tot_rq_work_done = 0, cur_work_done;
437 unsigned int i;
438 int err;
439
440 for (i = 0; i < fnic->rq_count; i++) {
441 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
442 fnic_rq_cmpl_handler_cont,
443 NULL);
444 if (cur_work_done) {
445 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
446 if (err)
447 shost_printk(KERN_ERR, fnic->lport->host,
448 "fnic_alloc_rq_frame cant alloc"
449 " frame\n");
450 }
451 tot_rq_work_done += cur_work_done;
452 }
453
454 return tot_rq_work_done;
455}
456
457/*
458 * This function is called once at init time to allocate and fill RQ
459 * buffers. Subsequently, it is called in the interrupt context after RQ
460 * buffer processing to replenish the buffers in the RQ
461 */
462int fnic_alloc_rq_frame(struct vnic_rq *rq)
463{
464 struct fnic *fnic = vnic_dev_priv(rq->vdev);
465 struct sk_buff *skb;
466 u16 len;
467 dma_addr_t pa;
468
469 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
470 skb = dev_alloc_skb(len);
471 if (!skb) {
472 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
473 "Unable to allocate RQ sk_buff\n");
474 return -ENOMEM;
475 }
476 skb_reset_mac_header(skb);
477 skb_reset_transport_header(skb);
478 skb_reset_network_header(skb);
479 skb_put(skb, len);
480 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
481 fnic_queue_rq_desc(rq, skb, pa, len);
482 return 0;
483}
484
485void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
486{
487 struct fc_frame *fp = buf->os_buf;
488 struct fnic *fnic = vnic_dev_priv(rq->vdev);
489
490 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
491 PCI_DMA_FROMDEVICE);
492
493 dev_kfree_skb(fp_skb(fp));
494 buf->os_buf = NULL;
495}
496
497static inline int is_flogi_frame(struct fc_frame_header *fh)
498{
499 return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
500}
501
502int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503{
504 struct vnic_wq *wq = &fnic->wq[0];
505 struct sk_buff *skb;
506 dma_addr_t pa;
507 struct ethhdr *eth_hdr;
508 struct vlan_ethhdr *vlan_hdr;
509 struct fcoe_hdr *fcoe_hdr;
510 struct fc_frame_header *fh;
511 u32 tot_len, eth_hdr_len;
512 int ret = 0;
513 unsigned long flags;
514
515 fh = fc_frame_header_get(fp);
516 skb = fp_skb(fp);
517
518 if (!fnic->vlan_hw_insert) {
519 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
521 eth_hdr = (struct ethhdr *)vlan_hdr;
522 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
523 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
524 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
525 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
526 } else {
527 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
528 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
529 eth_hdr->h_proto = htons(ETH_P_FCOE);
530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531 }
532
533 if (is_flogi_frame(fh)) {
534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
536 } else {
537 if (fnic->fcoui_mode)
538 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539 else
540 memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542 }
543
544 tot_len = skb->len;
545 BUG_ON(tot_len % 4);
546
547 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
548 fcoe_hdr->fcoe_sof = fr_sof(fp);
549 if (FC_FCOE_VER)
550 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
551
552 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
553
554 spin_lock_irqsave(&fnic->wq_lock[0], flags);
555
556 if (!vnic_wq_desc_avail(wq)) {
557 pci_unmap_single(fnic->pdev, pa,
558 tot_len, PCI_DMA_TODEVICE);
559 ret = -1;
560 goto fnic_send_frame_end;
561 }
562
563 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
564 fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
565fnic_send_frame_end:
566 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
567
568 if (ret)
569 dev_kfree_skb_any(fp_skb(fp));
570
571 return ret;
572}
573
574/*
575 * fnic_send
576 * Routine to send a raw frame
577 */
578int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579{
580 struct fnic *fnic = lport_priv(lp);
581 struct fc_frame_header *fh;
582 int ret = 0;
583 enum fnic_state old_state;
584 unsigned long flags;
585 struct fc_frame *old_flogi = NULL;
586 struct fc_frame *old_flogi_resp = NULL;
587
588 if (fnic->in_remove) {
589 dev_kfree_skb(fp_skb(fp));
590 ret = -1;
591 goto fnic_send_end;
592 }
593
594 fh = fc_frame_header_get(fp);
595 /* if not an Flogi frame, send it out, this is the common case */
596 if (!is_flogi_frame(fh))
597 return fnic_send_frame(fnic, fp);
598
599 /* Flogi frame, now enter the state machine */
600
601 spin_lock_irqsave(&fnic->fnic_lock, flags);
602again:
603 /* Get any old cached frames, free them after dropping lock */
604 old_flogi = fnic->flogi;
605 fnic->flogi = NULL;
606 old_flogi_resp = fnic->flogi_resp;
607 fnic->flogi_resp = NULL;
608
609 fnic->flogi_oxid = FC_XID_UNKNOWN;
610
611 old_state = fnic->state;
612 switch (old_state) {
613 case FNIC_IN_FC_MODE:
614 case FNIC_IN_ETH_TRANS_FC_MODE:
615 default:
616 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619
620 if (old_flogi) {
621 dev_kfree_skb(fp_skb(old_flogi));
622 old_flogi = NULL;
623 }
624 if (old_flogi_resp) {
625 dev_kfree_skb(fp_skb(old_flogi_resp));
626 old_flogi_resp = NULL;
627 }
628
629 ret = fnic_fw_reset_handler(fnic);
630
631 spin_lock_irqsave(&fnic->fnic_lock, flags);
632 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633 goto again;
634 if (ret) {
635 fnic->state = old_state;
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 dev_kfree_skb(fp_skb(fp));
638 goto fnic_send_end;
639 }
640 old_flogi = fnic->flogi;
641 fnic->flogi = fp;
642 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643 old_flogi_resp = fnic->flogi_resp;
644 fnic->flogi_resp = NULL;
645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 break;
647
648 case FNIC_IN_FC_TRANS_ETH_MODE:
649 /*
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
655 * FNIC_IN_FC_MODE
656 */
657 fnic->flogi = fp;
658 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660 break;
661
662 case FNIC_IN_ETH_MODE:
663 /*
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
671 */
672 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674 ret = fnic_send_frame(fnic, fp);
675 break;
676 }
677
678fnic_send_end:
679 if (old_flogi)
680 dev_kfree_skb(fp_skb(old_flogi));
681 if (old_flogi_resp)
682 dev_kfree_skb(fp_skb(old_flogi_resp));
683 return ret;
684}
685
686static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
687 struct cq_desc *cq_desc,
688 struct vnic_wq_buf *buf, void *opaque)
689{
690 struct sk_buff *skb = buf->os_buf;
691 struct fc_frame *fp = (struct fc_frame *)skb;
692 struct fnic *fnic = vnic_dev_priv(wq->vdev);
693
694 pci_unmap_single(fnic->pdev, buf->dma_addr,
695 buf->len, PCI_DMA_TODEVICE);
696 dev_kfree_skb_irq(fp_skb(fp));
697 buf->os_buf = NULL;
698}
699
700static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
701 struct cq_desc *cq_desc, u8 type,
702 u16 q_number, u16 completed_index,
703 void *opaque)
704{
705 struct fnic *fnic = vnic_dev_priv(vdev);
706 unsigned long flags;
707
708 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
709 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
710 fnic_wq_complete_frame_send, NULL);
711 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
712
713 return 0;
714}
715
716int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
717{
718 unsigned int wq_work_done = 0;
719 unsigned int i;
720
721 for (i = 0; i < fnic->raw_wq_count; i++) {
722 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
723 work_to_do,
724 fnic_wq_cmpl_handler_cont,
725 NULL);
726 }
727
728 return wq_work_done;
729}
730
731
732void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
733{
734 struct fc_frame *fp = buf->os_buf;
735 struct fnic *fnic = vnic_dev_priv(wq->vdev);
736
737 pci_unmap_single(fnic->pdev, buf->dma_addr,
738 buf->len, PCI_DMA_TODEVICE);
739
740 dev_kfree_skb(fp_skb(fp));
741 buf->os_buf = NULL;
742}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 00000000000..f0b896988cd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_IO_H_
19#define _FNIC_IO_H_
20
21#include <scsi/fc/fc_fcp.h>
22
23#define FNIC_DFLT_SG_DESC_CNT 32
24#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
25#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
26
27struct host_sg_desc {
28 __le64 addr;
29 __le32 len;
30 u32 _resvd;
31};
32
33struct fnic_dflt_sgl_list {
34 struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
35};
36
37struct fnic_sgl_list {
38 struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
39};
40
41enum fnic_sgl_list_type {
42 FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
43 FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
44 FNIC_SGL_NUM_CACHES /* number of sgl caches */
45};
46
47enum fnic_ioreq_state {
48 FNIC_IOREQ_CMD_PENDING = 0,
49 FNIC_IOREQ_ABTS_PENDING,
50 FNIC_IOREQ_ABTS_COMPLETE,
51 FNIC_IOREQ_CMD_COMPLETE,
52};
53
54struct fnic_io_req {
55 struct host_sg_desc *sgl_list; /* sgl list */
56 void *sgl_list_alloc; /* sgl list address used for free */
57 dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
58 dma_addr_t sgl_list_pa; /* dma address for sgl list */
59 u16 sgl_cnt;
60 u8 sgl_type; /* device DMA descriptor list type */
61 u8 io_completed:1; /* set to 1 when fw completes IO */
62 u32 port_id; /* remote port DID */
63 struct completion *abts_done; /* completion for abts */
64 struct completion *dr_done; /* completion for device reset */
65};
66
67#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 00000000000..2b3064828ae
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/pci.h>
21#include <linux/interrupt.h>
22#include <scsi/libfc.h>
23#include <scsi/fc_frame.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26#include "vnic_stats.h"
27#include "fnic_io.h"
28#include "fnic.h"
29
30static irqreturn_t fnic_isr_legacy(int irq, void *data)
31{
32 struct fnic *fnic = data;
33 u32 pba;
34 unsigned long work_done = 0;
35
36 pba = vnic_intr_legacy_pba(fnic->legacy_pba);
37 if (!pba)
38 return IRQ_NONE;
39
40 if (pba & (1 << FNIC_INTX_NOTIFY)) {
41 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
42 fnic_handle_link_event(fnic);
43 }
44
45 if (pba & (1 << FNIC_INTX_ERR)) {
46 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
47 fnic_log_q_error(fnic);
48 }
49
50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
51 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
52 work_done += fnic_wq_cmpl_handler(fnic, 4);
53 work_done += fnic_rq_cmpl_handler(fnic, 4);
54
55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
56 work_done,
57 1 /* unmask intr */,
58 1 /* reset intr timer */);
59 }
60
61 return IRQ_HANDLED;
62}
63
64static irqreturn_t fnic_isr_msi(int irq, void *data)
65{
66 struct fnic *fnic = data;
67 unsigned long work_done = 0;
68
69 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
70 work_done += fnic_wq_cmpl_handler(fnic, 4);
71 work_done += fnic_rq_cmpl_handler(fnic, 4);
72
73 vnic_intr_return_credits(&fnic->intr[0],
74 work_done,
75 1 /* unmask intr */,
76 1 /* reset intr timer */);
77
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
82{
83 struct fnic *fnic = data;
84 unsigned long rq_work_done = 0;
85
86 rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
88 rq_work_done,
89 1 /* unmask intr */,
90 1 /* reset intr timer */);
91
92 return IRQ_HANDLED;
93}
94
95static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
96{
97 struct fnic *fnic = data;
98 unsigned long wq_work_done = 0;
99
100 wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
102 wq_work_done,
103 1 /* unmask intr */,
104 1 /* reset intr timer */);
105 return IRQ_HANDLED;
106}
107
108static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
109{
110 struct fnic *fnic = data;
111 unsigned long wq_copy_work_done = 0;
112
113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
115 wq_copy_work_done,
116 1 /* unmask intr */,
117 1 /* reset intr timer */);
118 return IRQ_HANDLED;
119}
120
121static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
122{
123 struct fnic *fnic = data;
124
125 vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
126 fnic_log_q_error(fnic);
127 fnic_handle_link_event(fnic);
128
129 return IRQ_HANDLED;
130}
131
132void fnic_free_intr(struct fnic *fnic)
133{
134 int i;
135
136 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
137 case VNIC_DEV_INTR_MODE_INTX:
138 case VNIC_DEV_INTR_MODE_MSI:
139 free_irq(fnic->pdev->irq, fnic);
140 break;
141
142 case VNIC_DEV_INTR_MODE_MSIX:
143 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
144 if (fnic->msix[i].requested)
145 free_irq(fnic->msix_entry[i].vector,
146 fnic->msix[i].devid);
147 break;
148
149 default:
150 break;
151 }
152}
153
154int fnic_request_intr(struct fnic *fnic)
155{
156 int err = 0;
157 int i;
158
159 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
160
161 case VNIC_DEV_INTR_MODE_INTX:
162 err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
163 IRQF_SHARED, DRV_NAME, fnic);
164 break;
165
166 case VNIC_DEV_INTR_MODE_MSI:
167 err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
168 0, fnic->name, fnic);
169 break;
170
171 case VNIC_DEV_INTR_MODE_MSIX:
172
173 sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
174 "%.11s-fcs-rq", fnic->name);
175 fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
176 fnic->msix[FNIC_MSIX_RQ].devid = fnic;
177
178 sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
179 "%.11s-fcs-wq", fnic->name);
180 fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
181 fnic->msix[FNIC_MSIX_WQ].devid = fnic;
182
183 sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
184 "%.11s-scsi-wq", fnic->name);
185 fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
186 fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
187
188 sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
189 "%.11s-err-notify", fnic->name);
190 fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
191 fnic_isr_msix_err_notify;
192 fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
193
194 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
195 err = request_irq(fnic->msix_entry[i].vector,
196 fnic->msix[i].isr, 0,
197 fnic->msix[i].devname,
198 fnic->msix[i].devid);
199 if (err) {
200 shost_printk(KERN_ERR, fnic->lport->host,
201 "MSIX: request_irq"
202 " failed %d\n", err);
203 fnic_free_intr(fnic);
204 break;
205 }
206 fnic->msix[i].requested = 1;
207 }
208 break;
209
210 default:
211 break;
212 }
213
214 return err;
215}
216
217int fnic_set_intr_mode(struct fnic *fnic)
218{
219 unsigned int n = ARRAY_SIZE(fnic->rq);
220 unsigned int m = ARRAY_SIZE(fnic->wq);
221 unsigned int o = ARRAY_SIZE(fnic->wq_copy);
222 unsigned int i;
223
224 /*
225 * Set interrupt mode (INTx, MSI, MSI-X) depending
226 * system capabilities.
227 *
228 * Try MSI-X first
229 *
230 * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
231 * (last INTR is used for WQ/RQ errors and notification area)
232 */
233
234 BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
235 for (i = 0; i < n + m + o + 1; i++)
236 fnic->msix_entry[i].entry = i;
237
238 if (fnic->rq_count >= n &&
239 fnic->raw_wq_count >= m &&
240 fnic->wq_copy_count >= o &&
241 fnic->cq_count >= n + m + o) {
242 if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
243 n + m + o + 1)) {
244 fnic->rq_count = n;
245 fnic->raw_wq_count = m;
246 fnic->wq_copy_count = o;
247 fnic->wq_count = m + o;
248 fnic->cq_count = n + m + o;
249 fnic->intr_count = n + m + o + 1;
250 fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
251
252 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
253 "Using MSI-X Interrupts\n");
254 vnic_dev_set_intr_mode(fnic->vdev,
255 VNIC_DEV_INTR_MODE_MSIX);
256 return 0;
257 }
258 }
259
260 /*
261 * Next try MSI
262 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
263 */
264 if (fnic->rq_count >= 1 &&
265 fnic->raw_wq_count >= 1 &&
266 fnic->wq_copy_count >= 1 &&
267 fnic->cq_count >= 3 &&
268 fnic->intr_count >= 1 &&
269 !pci_enable_msi(fnic->pdev)) {
270
271 fnic->rq_count = 1;
272 fnic->raw_wq_count = 1;
273 fnic->wq_copy_count = 1;
274 fnic->wq_count = 2;
275 fnic->cq_count = 3;
276 fnic->intr_count = 1;
277 fnic->err_intr_offset = 0;
278
279 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
280 "Using MSI Interrupts\n");
281 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
282
283 return 0;
284 }
285
286 /*
287 * Next try INTx
288 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
289 * 1 INTR is used for all 3 queues, 1 INTR for queue errors
290 * 1 INTR for notification area
291 */
292
293 if (fnic->rq_count >= 1 &&
294 fnic->raw_wq_count >= 1 &&
295 fnic->wq_copy_count >= 1 &&
296 fnic->cq_count >= 3 &&
297 fnic->intr_count >= 3) {
298
299 fnic->rq_count = 1;
300 fnic->raw_wq_count = 1;
301 fnic->wq_copy_count = 1;
302 fnic->cq_count = 3;
303 fnic->intr_count = 3;
304
305 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
306 "Using Legacy Interrupts\n");
307 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
308
309 return 0;
310 }
311
312 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
313
314 return -EINVAL;
315}
316
317void fnic_clear_intr_mode(struct fnic *fnic)
318{
319 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
320 case VNIC_DEV_INTR_MODE_MSIX:
321 pci_disable_msix(fnic->pdev);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 pci_disable_msi(fnic->pdev);
325 break;
326 default:
327 break;
328 }
329
330 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
331}
332
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 00000000000..a84072865fc
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,943 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/skbuff.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/workqueue.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/libfc.h>
33#include <scsi/fc_frame.h>
34
35#include "vnic_dev.h"
36#include "vnic_intr.h"
37#include "vnic_stats.h"
38#include "fnic_io.h"
39#include "fnic.h"
40
41#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
42
43/* Timer to poll notification area for events. Used for MSI interrupts */
44#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
45
46static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
47static struct kmem_cache *fnic_io_req_cache;
48LIST_HEAD(fnic_list);
49DEFINE_SPINLOCK(fnic_list_lock);
50
51/* Supported devices by fnic module */
52static struct pci_device_id fnic_id_table[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
54 { 0, }
55};
56
57MODULE_DESCRIPTION(DRV_DESCRIPTION);
58MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
59 "Joseph R. Eykholt <jeykholt@cisco.com>");
60MODULE_LICENSE("GPL v2");
61MODULE_VERSION(DRV_VERSION);
62MODULE_DEVICE_TABLE(pci, fnic_id_table);
63
64unsigned int fnic_log_level;
65module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
67
68
69static struct libfc_function_template fnic_transport_template = {
70 .frame_send = fnic_send,
71 .fcp_abort_io = fnic_empty_scsi_cleanup,
72 .fcp_cleanup = fnic_empty_scsi_cleanup,
73 .exch_mgr_reset = fnic_exch_mgr_reset
74};
75
76static int fnic_slave_alloc(struct scsi_device *sdev)
77{
78 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
79 struct fc_lport *lp = shost_priv(sdev->host);
80 struct fnic *fnic = lport_priv(lp);
81
82 sdev->tagged_supported = 1;
83
84 if (!rport || fc_remote_port_chkready(rport))
85 return -ENXIO;
86
87 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
88 rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
89
90 return 0;
91}
92
93static struct scsi_host_template fnic_host_template = {
94 .module = THIS_MODULE,
95 .name = DRV_NAME,
96 .queuecommand = fnic_queuecommand,
97 .eh_abort_handler = fnic_abort_cmd,
98 .eh_device_reset_handler = fnic_device_reset,
99 .eh_host_reset_handler = fnic_host_reset,
100 .slave_alloc = fnic_slave_alloc,
101 .change_queue_depth = fc_change_queue_depth,
102 .change_queue_type = fc_change_queue_type,
103 .this_id = -1,
104 .cmd_per_lun = 3,
105 .can_queue = FNIC_MAX_IO_REQ,
106 .use_clustering = ENABLE_CLUSTERING,
107 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
108 .max_sectors = 0xffff,
109 .shost_attrs = fnic_attrs,
110};
111
112static void fnic_get_host_speed(struct Scsi_Host *shost);
113static struct scsi_transport_template *fnic_fc_transport;
114static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
115
116static struct fc_function_template fnic_fc_functions = {
117
118 .show_host_node_name = 1,
119 .show_host_port_name = 1,
120 .show_host_supported_classes = 1,
121 .show_host_supported_fc4s = 1,
122 .show_host_active_fc4s = 1,
123 .show_host_maxframe_size = 1,
124 .show_host_port_id = 1,
125 .show_host_supported_speeds = 1,
126 .get_host_speed = fnic_get_host_speed,
127 .show_host_speed = 1,
128 .show_host_port_type = 1,
129 .get_host_port_state = fc_get_host_port_state,
130 .show_host_port_state = 1,
131 .show_host_symbolic_name = 1,
132 .show_rport_maxframe_size = 1,
133 .show_rport_supported_classes = 1,
134 .show_host_fabric_name = 1,
135 .show_starget_node_name = 1,
136 .show_starget_port_name = 1,
137 .show_starget_port_id = 1,
138 .show_rport_dev_loss_tmo = 1,
139 .issue_fc_host_lip = fnic_reset,
140 .get_fc_host_stats = fnic_get_stats,
141 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
142 .terminate_rport_io = fnic_terminate_rport_io,
143};
144
145static void fnic_get_host_speed(struct Scsi_Host *shost)
146{
147 struct fc_lport *lp = shost_priv(shost);
148 struct fnic *fnic = lport_priv(lp);
149 u32 port_speed = vnic_dev_port_speed(fnic->vdev);
150
151 /* Add in other values as they get defined in fw */
152 switch (port_speed) {
153 case 10000:
154 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
155 break;
156 default:
157 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
158 break;
159 }
160}
161
162static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
163{
164 int ret;
165 struct fc_lport *lp = shost_priv(host);
166 struct fnic *fnic = lport_priv(lp);
167 struct fc_host_statistics *stats = &lp->host_stats;
168 struct vnic_stats *vs;
169 unsigned long flags;
170
171 if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
172 return stats;
173 fnic->stats_time = jiffies;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178
179 if (ret) {
180 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
181 "fnic: Get vnic stats failed"
182 " 0x%x", ret);
183 return stats;
184 }
185 vs = fnic->stats;
186 stats->tx_frames = vs->tx.tx_unicast_frames_ok;
187 stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
188 stats->rx_frames = vs->rx.rx_unicast_frames_ok;
189 stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
190 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
191 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
192 stats->invalid_crc_count = vs->rx.rx_crc_errors;
193 stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
194 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
195 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
196
197 return stats;
198}
199
200void fnic_log_q_error(struct fnic *fnic)
201{
202 unsigned int i;
203 u32 error_status;
204
205 for (i = 0; i < fnic->raw_wq_count; i++) {
206 error_status = ioread32(&fnic->wq[i].ctrl->error_status);
207 if (error_status)
208 shost_printk(KERN_ERR, fnic->lport->host,
209 "WQ[%d] error_status"
210 " %d\n", i, error_status);
211 }
212
213 for (i = 0; i < fnic->rq_count; i++) {
214 error_status = ioread32(&fnic->rq[i].ctrl->error_status);
215 if (error_status)
216 shost_printk(KERN_ERR, fnic->lport->host,
217 "RQ[%d] error_status"
218 " %d\n", i, error_status);
219 }
220
221 for (i = 0; i < fnic->wq_copy_count; i++) {
222 error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
223 if (error_status)
224 shost_printk(KERN_ERR, fnic->lport->host,
225 "CWQ[%d] error_status"
226 " %d\n", i, error_status);
227 }
228}
229
230void fnic_handle_link_event(struct fnic *fnic)
231{
232 unsigned long flags;
233
234 spin_lock_irqsave(&fnic->fnic_lock, flags);
235 if (fnic->stop_rx_link_events) {
236 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
237 return;
238 }
239 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
240
241 queue_work(fnic_event_queue, &fnic->link_work);
242
243}
244
245static int fnic_notify_set(struct fnic *fnic)
246{
247 int err;
248
249 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
250 case VNIC_DEV_INTR_MODE_INTX:
251 err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
252 break;
253 case VNIC_DEV_INTR_MODE_MSI:
254 err = vnic_dev_notify_set(fnic->vdev, -1);
255 break;
256 case VNIC_DEV_INTR_MODE_MSIX:
257 err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
258 break;
259 default:
260 shost_printk(KERN_ERR, fnic->lport->host,
261 "Interrupt mode should be set up"
262 " before devcmd notify set %d\n",
263 vnic_dev_get_intr_mode(fnic->vdev));
264 err = -1;
265 break;
266 }
267
268 return err;
269}
270
271static void fnic_notify_timer(unsigned long data)
272{
273 struct fnic *fnic = (struct fnic *)data;
274
275 fnic_handle_link_event(fnic);
276 mod_timer(&fnic->notify_timer,
277 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
278}
279
280static void fnic_notify_timer_start(struct fnic *fnic)
281{
282 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
283 case VNIC_DEV_INTR_MODE_MSI:
284 /*
285 * Schedule first timeout immediately. The driver is
286 * initiatialized and ready to look for link up notification
287 */
288 mod_timer(&fnic->notify_timer, jiffies);
289 break;
290 default:
291 /* Using intr for notification for INTx/MSI-X */
292 break;
293 };
294}
295
296static int fnic_dev_wait(struct vnic_dev *vdev,
297 int (*start)(struct vnic_dev *, int),
298 int (*finished)(struct vnic_dev *, int *),
299 int arg)
300{
301 unsigned long time;
302 int done;
303 int err;
304
305 err = start(vdev, arg);
306 if (err)
307 return err;
308
309 /* Wait for func to complete...2 seconds max */
310 time = jiffies + (HZ * 2);
311 do {
312 err = finished(vdev, &done);
313 if (err)
314 return err;
315 if (done)
316 return 0;
317 schedule_timeout_uninterruptible(HZ / 10);
318 } while (time_after(time, jiffies));
319
320 return -ETIMEDOUT;
321}
322
323static int fnic_cleanup(struct fnic *fnic)
324{
325 unsigned int i;
326 int err;
327 unsigned long flags;
328 struct fc_frame *flogi = NULL;
329 struct fc_frame *flogi_resp = NULL;
330
331 vnic_dev_disable(fnic->vdev);
332 for (i = 0; i < fnic->intr_count; i++)
333 vnic_intr_mask(&fnic->intr[i]);
334
335 for (i = 0; i < fnic->rq_count; i++) {
336 err = vnic_rq_disable(&fnic->rq[i]);
337 if (err)
338 return err;
339 }
340 for (i = 0; i < fnic->raw_wq_count; i++) {
341 err = vnic_wq_disable(&fnic->wq[i]);
342 if (err)
343 return err;
344 }
345 for (i = 0; i < fnic->wq_copy_count; i++) {
346 err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
347 if (err)
348 return err;
349 }
350
351 /* Clean up completed IOs and FCS frames */
352 fnic_wq_copy_cmpl_handler(fnic, -1);
353 fnic_wq_cmpl_handler(fnic, -1);
354 fnic_rq_cmpl_handler(fnic, -1);
355
356 /* Clean up the IOs and FCS frames that have not completed */
357 for (i = 0; i < fnic->raw_wq_count; i++)
358 vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
359 for (i = 0; i < fnic->rq_count; i++)
360 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
361 for (i = 0; i < fnic->wq_copy_count; i++)
362 vnic_wq_copy_clean(&fnic->wq_copy[i],
363 fnic_wq_copy_cleanup_handler);
364
365 for (i = 0; i < fnic->cq_count; i++)
366 vnic_cq_clean(&fnic->cq[i]);
367 for (i = 0; i < fnic->intr_count; i++)
368 vnic_intr_clean(&fnic->intr[i]);
369
370 /*
371 * Remove cached flogi and flogi resp frames if any
372 * These frames are not in any queue, and therefore queue
373 * cleanup does not clean them. So clean them explicitly
374 */
375 spin_lock_irqsave(&fnic->fnic_lock, flags);
376 flogi = fnic->flogi;
377 fnic->flogi = NULL;
378 flogi_resp = fnic->flogi_resp;
379 fnic->flogi_resp = NULL;
380 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
381
382 if (flogi)
383 dev_kfree_skb(fp_skb(flogi));
384
385 if (flogi_resp)
386 dev_kfree_skb(fp_skb(flogi_resp));
387
388 mempool_destroy(fnic->io_req_pool);
389 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
390 mempool_destroy(fnic->io_sgl_pool[i]);
391
392 return 0;
393}
394
395static void fnic_iounmap(struct fnic *fnic)
396{
397 if (fnic->bar0.vaddr)
398 iounmap(fnic->bar0.vaddr);
399}
400
401/*
402 * Allocate element for mempools requiring GFP_DMA flag.
403 * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
404 */
405static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
406{
407 struct kmem_cache *mem = pool_data;
408
409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
410}
411
412static int __devinit fnic_probe(struct pci_dev *pdev,
413 const struct pci_device_id *ent)
414{
415 struct Scsi_Host *host;
416 struct fc_lport *lp;
417 struct fnic *fnic;
418 mempool_t *pool;
419 int err;
420 int i;
421 unsigned long flags;
422
423 /*
424 * Allocate SCSI Host and set up association between host,
425 * local port, and fnic
426 */
427 host = scsi_host_alloc(&fnic_host_template,
428 sizeof(struct fc_lport) + sizeof(struct fnic));
429 if (!host) {
430 printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
431 err = -ENOMEM;
432 goto err_out;
433 }
434 lp = shost_priv(host);
435 lp->host = host;
436 fnic = lport_priv(lp);
437 fnic->lport = lp;
438
439 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
440 host->host_no);
441
442 host->transportt = fnic_fc_transport;
443
444 err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
445 if (err) {
446 shost_printk(KERN_ERR, fnic->lport->host,
447 "Unable to alloc shared tag map\n");
448 goto err_out_free_hba;
449 }
450
451 /* Setup PCI resources */
452 pci_set_drvdata(pdev, fnic);
453
454 fnic->pdev = pdev;
455
456 err = pci_enable_device(pdev);
457 if (err) {
458 shost_printk(KERN_ERR, fnic->lport->host,
459 "Cannot enable PCI device, aborting.\n");
460 goto err_out_free_hba;
461 }
462
463 err = pci_request_regions(pdev, DRV_NAME);
464 if (err) {
465 shost_printk(KERN_ERR, fnic->lport->host,
466 "Cannot enable PCI resources, aborting\n");
467 goto err_out_disable_device;
468 }
469
470 pci_set_master(pdev);
471
472 /* Query PCI controller on system for DMA addressing
473 * limitation for the device. Try 40-bit first, and
474 * fail to 32-bit.
475 */
476 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
477 if (err) {
478 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
479 if (err) {
480 shost_printk(KERN_ERR, fnic->lport->host,
481 "No usable DMA configuration "
482 "aborting\n");
483 goto err_out_release_regions;
484 }
485 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
486 if (err) {
487 shost_printk(KERN_ERR, fnic->lport->host,
488 "Unable to obtain 32-bit DMA "
489 "for consistent allocations, aborting.\n");
490 goto err_out_release_regions;
491 }
492 } else {
493 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
494 if (err) {
495 shost_printk(KERN_ERR, fnic->lport->host,
496 "Unable to obtain 40-bit DMA "
497 "for consistent allocations, aborting.\n");
498 goto err_out_release_regions;
499 }
500 }
501
502 /* Map vNIC resources from BAR0 */
503 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
504 shost_printk(KERN_ERR, fnic->lport->host,
505 "BAR0 not memory-map'able, aborting.\n");
506 err = -ENODEV;
507 goto err_out_release_regions;
508 }
509
510 fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
511 fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
512 fnic->bar0.len = pci_resource_len(pdev, 0);
513
514 if (!fnic->bar0.vaddr) {
515 shost_printk(KERN_ERR, fnic->lport->host,
516 "Cannot memory-map BAR0 res hdr, "
517 "aborting.\n");
518 err = -ENODEV;
519 goto err_out_release_regions;
520 }
521
522 fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
523 if (!fnic->vdev) {
524 shost_printk(KERN_ERR, fnic->lport->host,
525 "vNIC registration failed, "
526 "aborting.\n");
527 err = -ENODEV;
528 goto err_out_iounmap;
529 }
530
531 err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
532 vnic_dev_open_done, 0);
533 if (err) {
534 shost_printk(KERN_ERR, fnic->lport->host,
535 "vNIC dev open failed, aborting.\n");
536 goto err_out_vnic_unregister;
537 }
538
539 err = vnic_dev_init(fnic->vdev, 0);
540 if (err) {
541 shost_printk(KERN_ERR, fnic->lport->host,
542 "vNIC dev init failed, aborting.\n");
543 goto err_out_dev_close;
544 }
545
546 err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
547 if (err) {
548 shost_printk(KERN_ERR, fnic->lport->host,
549 "vNIC get MAC addr failed \n");
550 goto err_out_dev_close;
551 }
552
553 /* Get vNIC configuration */
554 err = fnic_get_vnic_config(fnic);
555 if (err) {
556 shost_printk(KERN_ERR, fnic->lport->host,
557 "Get vNIC configuration failed, "
558 "aborting.\n");
559 goto err_out_dev_close;
560 }
561 host->max_lun = fnic->config.luns_per_tgt;
562 host->max_id = FNIC_MAX_FCP_TARGET;
563
564 fnic_get_res_counts(fnic);
565
566 err = fnic_set_intr_mode(fnic);
567 if (err) {
568 shost_printk(KERN_ERR, fnic->lport->host,
569 "Failed to set intr mode, "
570 "aborting.\n");
571 goto err_out_dev_close;
572 }
573
574 err = fnic_request_intr(fnic);
575 if (err) {
576 shost_printk(KERN_ERR, fnic->lport->host,
577 "Unable to request irq.\n");
578 goto err_out_clear_intr;
579 }
580
581 err = fnic_alloc_vnic_resources(fnic);
582 if (err) {
583 shost_printk(KERN_ERR, fnic->lport->host,
584 "Failed to alloc vNIC resources, "
585 "aborting.\n");
586 goto err_out_free_intr;
587 }
588
589
590 /* initialize all fnic locks */
591 spin_lock_init(&fnic->fnic_lock);
592
593 for (i = 0; i < FNIC_WQ_MAX; i++)
594 spin_lock_init(&fnic->wq_lock[i]);
595
596 for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
597 spin_lock_init(&fnic->wq_copy_lock[i]);
598 fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
599 fnic->fw_ack_recd[i] = 0;
600 fnic->fw_ack_index[i] = -1;
601 }
602
603 for (i = 0; i < FNIC_IO_LOCKS; i++)
604 spin_lock_init(&fnic->io_req_lock[i]);
605
606 fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
607 if (!fnic->io_req_pool)
608 goto err_out_free_resources;
609
610 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
611 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
612 if (!pool)
613 goto err_out_free_ioreq_pool;
614 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
615
616 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
617 fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
618 if (!pool)
619 goto err_out_free_dflt_pool;
620 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
621
622 /* setup vlan config, hw inserts vlan header */
623 fnic->vlan_hw_insert = 1;
624 fnic->vlan_id = 0;
625
626 fnic->flogi_oxid = FC_XID_UNKNOWN;
627 fnic->flogi = NULL;
628 fnic->flogi_resp = NULL;
629 fnic->state = FNIC_IN_FC_MODE;
630
631 /* Enable hardware stripping of vlan header on ingress */
632 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
633
634 /* Setup notification buffer area */
635 err = fnic_notify_set(fnic);
636 if (err) {
637 shost_printk(KERN_ERR, fnic->lport->host,
638 "Failed to alloc notify buffer, aborting.\n");
639 goto err_out_free_max_pool;
640 }
641
642 /* Setup notify timer when using MSI interrupts */
643 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
644 setup_timer(&fnic->notify_timer,
645 fnic_notify_timer, (unsigned long)fnic);
646
647 /* allocate RQ buffers and post them to RQ*/
648 for (i = 0; i < fnic->rq_count; i++) {
649 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
650 if (err) {
651 shost_printk(KERN_ERR, fnic->lport->host,
652 "fnic_alloc_rq_frame can't alloc "
653 "frame\n");
654 goto err_out_free_rq_buf;
655 }
656 }
657
658 /*
659 * Initialization done with PCI system, hardware, firmware.
660 * Add host to SCSI
661 */
662 err = scsi_add_host(lp->host, &pdev->dev);
663 if (err) {
664 shost_printk(KERN_ERR, fnic->lport->host,
665 "fnic: scsi_add_host failed...exiting\n");
666 goto err_out_free_rq_buf;
667 }
668
669 /* Start local port initiatialization */
670
671 lp->link_up = 0;
672 lp->tt = fnic_transport_template;
673
674 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
675 FCPIO_HOST_EXCH_RANGE_START,
676 FCPIO_HOST_EXCH_RANGE_END);
677 if (!lp->emp) {
678 err = -ENOMEM;
679 goto err_out_remove_scsi_host;
680 }
681
682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries;
684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
685 FCP_SPPF_CONF_COMPL);
686 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
687 lp->service_params |= FCP_SPPF_RETRY;
688
689 lp->boot_time = jiffies;
690 lp->e_d_tov = fnic->config.ed_tov;
691 lp->r_a_tov = fnic->config.ra_tov;
692 lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
693 fc_set_wwnn(lp, fnic->config.node_wwn);
694 fc_set_wwpn(lp, fnic->config.port_wwn);
695
696 fc_exch_init(lp);
697 fc_lport_init(lp);
698 fc_elsct_init(lp);
699 fc_rport_init(lp);
700 fc_disc_init(lp);
701
702 fc_lport_config(lp);
703
704 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
705 sizeof(struct fc_frame_header))) {
706 err = -EINVAL;
707 goto err_out_free_exch_mgr;
708 }
709 fc_host_maxframe_size(lp->host) = lp->mfs;
710
711 sprintf(fc_host_symbolic_name(lp->host),
712 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
713
714 spin_lock_irqsave(&fnic_list_lock, flags);
715 list_add_tail(&fnic->list, &fnic_list);
716 spin_unlock_irqrestore(&fnic_list_lock, flags);
717
718 INIT_WORK(&fnic->link_work, fnic_handle_link);
719 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
720 skb_queue_head_init(&fnic->frame_queue);
721
722 /* Enable all queues */
723 for (i = 0; i < fnic->raw_wq_count; i++)
724 vnic_wq_enable(&fnic->wq[i]);
725 for (i = 0; i < fnic->rq_count; i++)
726 vnic_rq_enable(&fnic->rq[i]);
727 for (i = 0; i < fnic->wq_copy_count; i++)
728 vnic_wq_copy_enable(&fnic->wq_copy[i]);
729
730 fc_fabric_login(lp);
731
732 vnic_dev_enable(fnic->vdev);
733 for (i = 0; i < fnic->intr_count; i++)
734 vnic_intr_unmask(&fnic->intr[i]);
735
736 fnic_notify_timer_start(fnic);
737
738 return 0;
739
740err_out_free_exch_mgr:
741 fc_exch_mgr_free(lp->emp);
742err_out_remove_scsi_host:
743 fc_remove_host(fnic->lport->host);
744 scsi_remove_host(fnic->lport->host);
745err_out_free_rq_buf:
746 for (i = 0; i < fnic->rq_count; i++)
747 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
748 vnic_dev_notify_unset(fnic->vdev);
749err_out_free_max_pool:
750 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
751err_out_free_dflt_pool:
752 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
753err_out_free_ioreq_pool:
754 mempool_destroy(fnic->io_req_pool);
755err_out_free_resources:
756 fnic_free_vnic_resources(fnic);
757err_out_free_intr:
758 fnic_free_intr(fnic);
759err_out_clear_intr:
760 fnic_clear_intr_mode(fnic);
761err_out_dev_close:
762 vnic_dev_close(fnic->vdev);
763err_out_vnic_unregister:
764 vnic_dev_unregister(fnic->vdev);
765err_out_iounmap:
766 fnic_iounmap(fnic);
767err_out_release_regions:
768 pci_release_regions(pdev);
769err_out_disable_device:
770 pci_disable_device(pdev);
771err_out_free_hba:
772 scsi_host_put(lp->host);
773err_out:
774 return err;
775}
776
777static void __devexit fnic_remove(struct pci_dev *pdev)
778{
779 struct fnic *fnic = pci_get_drvdata(pdev);
780 unsigned long flags;
781
782 /*
783 * Mark state so that the workqueue thread stops forwarding
784 * received frames and link events to the local port. ISR and
785 * other threads that can queue work items will also stop
786 * creating work items on the fnic workqueue
787 */
788 spin_lock_irqsave(&fnic->fnic_lock, flags);
789 fnic->stop_rx_link_events = 1;
790 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
791
792 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
793 del_timer_sync(&fnic->notify_timer);
794
795 /*
796 * Flush the fnic event queue. After this call, there should
797 * be no event queued for this fnic device in the workqueue
798 */
799 flush_workqueue(fnic_event_queue);
800 skb_queue_purge(&fnic->frame_queue);
801
802 /*
803 * Log off the fabric. This stops all remote ports, dns port,
804 * logs off the fabric. This flushes all rport, disc, lport work
805 * before returning
806 */
807 fc_fabric_logoff(fnic->lport);
808
809 spin_lock_irqsave(&fnic->fnic_lock, flags);
810 fnic->in_remove = 1;
811 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
812
813 fc_lport_destroy(fnic->lport);
814
815 /*
816 * This stops the fnic device, masks all interrupts. Completed
817 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
818 * cleaned up
819 */
820 fnic_cleanup(fnic);
821
822 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
823
824 spin_lock_irqsave(&fnic_list_lock, flags);
825 list_del(&fnic->list);
826 spin_unlock_irqrestore(&fnic_list_lock, flags);
827
828 fc_remove_host(fnic->lport->host);
829 scsi_remove_host(fnic->lport->host);
830 fc_exch_mgr_free(fnic->lport->emp);
831 vnic_dev_notify_unset(fnic->vdev);
832 fnic_free_vnic_resources(fnic);
833 fnic_free_intr(fnic);
834 fnic_clear_intr_mode(fnic);
835 vnic_dev_close(fnic->vdev);
836 vnic_dev_unregister(fnic->vdev);
837 fnic_iounmap(fnic);
838 pci_release_regions(pdev);
839 pci_disable_device(pdev);
840 pci_set_drvdata(pdev, NULL);
841 scsi_host_put(fnic->lport->host);
842}
843
844static struct pci_driver fnic_driver = {
845 .name = DRV_NAME,
846 .id_table = fnic_id_table,
847 .probe = fnic_probe,
848 .remove = __devexit_p(fnic_remove),
849};
850
851static int __init fnic_init_module(void)
852{
853 size_t len;
854 int err = 0;
855
856 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
857
858 /* Create a cache for allocation of default size sgls */
859 len = sizeof(struct fnic_dflt_sgl_list);
860 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
861 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
862 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
863 NULL);
864 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
865 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
866 err = -ENOMEM;
867 goto err_create_fnic_sgl_slab_dflt;
868 }
869
870 /* Create a cache for allocation of max size sgls*/
871 len = sizeof(struct fnic_sgl_list);
872 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
873 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
874 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
875 NULL);
876 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
877 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
878 err = -ENOMEM;
879 goto err_create_fnic_sgl_slab_max;
880 }
881
882 /* Create a cache of io_req structs for use via mempool */
883 fnic_io_req_cache = kmem_cache_create("fnic_io_req",
884 sizeof(struct fnic_io_req),
885 0, SLAB_HWCACHE_ALIGN, NULL);
886 if (!fnic_io_req_cache) {
887 printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
888 err = -ENOMEM;
889 goto err_create_fnic_ioreq_slab;
890 }
891
892 fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
893 if (!fnic_event_queue) {
894 printk(KERN_ERR PFX "fnic work queue create failed\n");
895 err = -ENOMEM;
896 goto err_create_fnic_workq;
897 }
898
899 spin_lock_init(&fnic_list_lock);
900 INIT_LIST_HEAD(&fnic_list);
901
902 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
903 if (!fnic_fc_transport) {
904 printk(KERN_ERR PFX "fc_attach_transport error\n");
905 err = -ENOMEM;
906 goto err_fc_transport;
907 }
908
909 /* register the driver with PCI system */
910 err = pci_register_driver(&fnic_driver);
911 if (err < 0) {
912 printk(KERN_ERR PFX "pci register error\n");
913 goto err_pci_register;
914 }
915 return err;
916
917err_pci_register:
918 fc_release_transport(fnic_fc_transport);
919err_fc_transport:
920 destroy_workqueue(fnic_event_queue);
921err_create_fnic_workq:
922 kmem_cache_destroy(fnic_io_req_cache);
923err_create_fnic_ioreq_slab:
924 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
925err_create_fnic_sgl_slab_max:
926 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
927err_create_fnic_sgl_slab_dflt:
928 return err;
929}
930
931static void __exit fnic_cleanup_module(void)
932{
933 pci_unregister_driver(&fnic_driver);
934 destroy_workqueue(fnic_event_queue);
935 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
936 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
937 kmem_cache_destroy(fnic_io_req_cache);
938 fc_release_transport(fnic_fc_transport);
939}
940
941module_init(fnic_init_module);
942module_exit(fnic_cleanup_module);
943
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 00000000000..7ba61ec715d
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "cq_enet_desc.h"
24#include "vnic_resource.h"
25#include "vnic_dev.h"
26#include "vnic_wq.h"
27#include "vnic_rq.h"
28#include "vnic_cq.h"
29#include "vnic_intr.h"
30#include "vnic_stats.h"
31#include "vnic_nic.h"
32#include "fnic.h"
33
34int fnic_get_vnic_config(struct fnic *fnic)
35{
36 struct vnic_fc_config *c = &fnic->config;
37 int err;
38
39#define GET_CONFIG(m) \
40 do { \
41 err = vnic_dev_spec(fnic->vdev, \
42 offsetof(struct vnic_fc_config, m), \
43 sizeof(c->m), &c->m); \
44 if (err) { \
45 shost_printk(KERN_ERR, fnic->lport->host, \
46 "Error getting %s, %d\n", #m, \
47 err); \
48 return err; \
49 } \
50 } while (0);
51
52 GET_CONFIG(node_wwn);
53 GET_CONFIG(port_wwn);
54 GET_CONFIG(wq_enet_desc_count);
55 GET_CONFIG(wq_copy_desc_count);
56 GET_CONFIG(rq_desc_count);
57 GET_CONFIG(maxdatafieldsize);
58 GET_CONFIG(ed_tov);
59 GET_CONFIG(ra_tov);
60 GET_CONFIG(intr_timer);
61 GET_CONFIG(intr_timer_type);
62 GET_CONFIG(flags);
63 GET_CONFIG(flogi_retries);
64 GET_CONFIG(flogi_timeout);
65 GET_CONFIG(plogi_retries);
66 GET_CONFIG(plogi_timeout);
67 GET_CONFIG(io_throttle_count);
68 GET_CONFIG(link_down_timeout);
69 GET_CONFIG(port_down_timeout);
70 GET_CONFIG(port_down_io_retries);
71 GET_CONFIG(luns_per_tgt);
72
73 c->wq_enet_desc_count =
74 min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
75 max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
76 c->wq_enet_desc_count));
77 c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
78
79 c->wq_copy_desc_count =
80 min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
81 max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
82 c->wq_copy_desc_count));
83 c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
84
85 c->rq_desc_count =
86 min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
87 max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
88 c->rq_desc_count));
89 c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
90
91 c->maxdatafieldsize =
92 min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
93 max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
94 c->maxdatafieldsize));
95 c->ed_tov =
96 min_t(u32, VNIC_FNIC_EDTOV_MAX,
97 max_t(u32, VNIC_FNIC_EDTOV_MIN,
98 c->ed_tov));
99
100 c->ra_tov =
101 min_t(u32, VNIC_FNIC_RATOV_MAX,
102 max_t(u32, VNIC_FNIC_RATOV_MIN,
103 c->ra_tov));
104
105 c->flogi_retries =
106 min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
107
108 c->flogi_timeout =
109 min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
110 max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
111 c->flogi_timeout));
112
113 c->plogi_retries =
114 min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
115
116 c->plogi_timeout =
117 min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
118 max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
119 c->plogi_timeout));
120
121 c->io_throttle_count =
122 min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
123 max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
124 c->io_throttle_count));
125
126 c->link_down_timeout =
127 min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
128 c->link_down_timeout);
129
130 c->port_down_timeout =
131 min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
132 c->port_down_timeout);
133
134 c->port_down_io_retries =
135 min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
136 c->port_down_io_retries);
137
138 c->luns_per_tgt =
139 min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
140 max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
141 c->luns_per_tgt));
142
143 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
144 c->intr_timer_type = c->intr_timer_type;
145
146 shost_printk(KERN_INFO, fnic->lport->host,
147 "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
148 "wq/wq_copy/rq %d/%d/%d\n",
149 fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
150 fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
151 c->wq_enet_desc_count, c->wq_copy_desc_count,
152 c->rq_desc_count);
153 shost_printk(KERN_INFO, fnic->lport->host,
154 "vNIC node wwn %llx port wwn %llx\n",
155 c->node_wwn, c->port_wwn);
156 shost_printk(KERN_INFO, fnic->lport->host,
157 "vNIC ed_tov %d ra_tov %d\n",
158 c->ed_tov, c->ra_tov);
159 shost_printk(KERN_INFO, fnic->lport->host,
160 "vNIC mtu %d intr timer %d\n",
161 c->maxdatafieldsize, c->intr_timer);
162 shost_printk(KERN_INFO, fnic->lport->host,
163 "vNIC flags 0x%x luns per tgt %d\n",
164 c->flags, c->luns_per_tgt);
165 shost_printk(KERN_INFO, fnic->lport->host,
166 "vNIC flogi_retries %d flogi timeout %d\n",
167 c->flogi_retries, c->flogi_timeout);
168 shost_printk(KERN_INFO, fnic->lport->host,
169 "vNIC plogi retries %d plogi timeout %d\n",
170 c->plogi_retries, c->plogi_timeout);
171 shost_printk(KERN_INFO, fnic->lport->host,
172 "vNIC io throttle count %d link dn timeout %d\n",
173 c->io_throttle_count, c->link_down_timeout);
174 shost_printk(KERN_INFO, fnic->lport->host,
175 "vNIC port dn io retries %d port dn timeout %d\n",
176 c->port_down_io_retries, c->port_down_timeout);
177
178 return 0;
179}
180
181int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
182 u8 rss_hash_type,
183 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
184 u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
185{
186 u64 a0, a1;
187 u32 nic_cfg;
188 int wait = 1000;
189
190 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
191 rss_hash_type, rss_hash_bits, rss_base_cpu,
192 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
193
194 a0 = nic_cfg;
195 a1 = 0;
196
197 return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
198}
199
200void fnic_get_res_counts(struct fnic *fnic)
201{
202 fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
203 fnic->raw_wq_count = fnic->wq_count - 1;
204 fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
205 fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
206 fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
207 fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
208 RES_TYPE_INTR_CTRL);
209}
210
211void fnic_free_vnic_resources(struct fnic *fnic)
212{
213 unsigned int i;
214
215 for (i = 0; i < fnic->raw_wq_count; i++)
216 vnic_wq_free(&fnic->wq[i]);
217
218 for (i = 0; i < fnic->wq_copy_count; i++)
219 vnic_wq_copy_free(&fnic->wq_copy[i]);
220
221 for (i = 0; i < fnic->rq_count; i++)
222 vnic_rq_free(&fnic->rq[i]);
223
224 for (i = 0; i < fnic->cq_count; i++)
225 vnic_cq_free(&fnic->cq[i]);
226
227 for (i = 0; i < fnic->intr_count; i++)
228 vnic_intr_free(&fnic->intr[i]);
229}
230
231int fnic_alloc_vnic_resources(struct fnic *fnic)
232{
233 enum vnic_dev_intr_mode intr_mode;
234 unsigned int mask_on_assertion;
235 unsigned int interrupt_offset;
236 unsigned int error_interrupt_enable;
237 unsigned int error_interrupt_offset;
238 unsigned int i, cq_index;
239 unsigned int wq_copy_cq_desc_count;
240 int err;
241
242 intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
243
244 shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
245 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
246 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
247 intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
248 "MSI-X" : "unknown");
249
250 shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
251 "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
252 fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
253 fnic->rq_count, fnic->cq_count, fnic->intr_count);
254
255 /* Allocate Raw WQ used for FCS frames */
256 for (i = 0; i < fnic->raw_wq_count; i++) {
257 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
258 fnic->config.wq_enet_desc_count,
259 sizeof(struct wq_enet_desc));
260 if (err)
261 goto err_out_cleanup;
262 }
263
264 /* Allocate Copy WQs used for SCSI IOs */
265 for (i = 0; i < fnic->wq_copy_count; i++) {
266 err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
267 (fnic->raw_wq_count + i),
268 fnic->config.wq_copy_desc_count,
269 sizeof(struct fcpio_host_req));
270 if (err)
271 goto err_out_cleanup;
272 }
273
274 /* RQ for receiving FCS frames */
275 for (i = 0; i < fnic->rq_count; i++) {
276 err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
277 fnic->config.rq_desc_count,
278 sizeof(struct rq_enet_desc));
279 if (err)
280 goto err_out_cleanup;
281 }
282
283 /* CQ for each RQ */
284 for (i = 0; i < fnic->rq_count; i++) {
285 cq_index = i;
286 err = vnic_cq_alloc(fnic->vdev,
287 &fnic->cq[cq_index], cq_index,
288 fnic->config.rq_desc_count,
289 sizeof(struct cq_enet_rq_desc));
290 if (err)
291 goto err_out_cleanup;
292 }
293
294 /* CQ for each WQ */
295 for (i = 0; i < fnic->raw_wq_count; i++) {
296 cq_index = fnic->rq_count + i;
297 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
298 fnic->config.wq_enet_desc_count,
299 sizeof(struct cq_enet_wq_desc));
300 if (err)
301 goto err_out_cleanup;
302 }
303
304 /* CQ for each COPY WQ */
305 wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
306 for (i = 0; i < fnic->wq_copy_count; i++) {
307 cq_index = fnic->raw_wq_count + fnic->rq_count + i;
308 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
309 cq_index,
310 wq_copy_cq_desc_count,
311 sizeof(struct fcpio_fw_req));
312 if (err)
313 goto err_out_cleanup;
314 }
315
316 for (i = 0; i < fnic->intr_count; i++) {
317 err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
318 if (err)
319 goto err_out_cleanup;
320 }
321
322 fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
323 RES_TYPE_INTR_PBA_LEGACY, 0);
324
325 if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
326 shost_printk(KERN_ERR, fnic->lport->host,
327 "Failed to hook legacy pba resource\n");
328 err = -ENODEV;
329 goto err_out_cleanup;
330 }
331
332 /*
333 * Init RQ/WQ resources.
334 *
335 * RQ[0 to n-1] point to CQ[0 to n-1]
336 * WQ[0 to m-1] point to CQ[n to n+m-1]
337 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
338 *
339 * Note for copy wq we always initialize with cq_index = 0
340 *
341 * Error interrupt is not enabled for MSI.
342 */
343
344 switch (intr_mode) {
345 case VNIC_DEV_INTR_MODE_INTX:
346 case VNIC_DEV_INTR_MODE_MSIX:
347 error_interrupt_enable = 1;
348 error_interrupt_offset = fnic->err_intr_offset;
349 break;
350 default:
351 error_interrupt_enable = 0;
352 error_interrupt_offset = 0;
353 break;
354 }
355
356 for (i = 0; i < fnic->rq_count; i++) {
357 cq_index = i;
358 vnic_rq_init(&fnic->rq[i],
359 cq_index,
360 error_interrupt_enable,
361 error_interrupt_offset);
362 }
363
364 for (i = 0; i < fnic->raw_wq_count; i++) {
365 cq_index = i + fnic->rq_count;
366 vnic_wq_init(&fnic->wq[i],
367 cq_index,
368 error_interrupt_enable,
369 error_interrupt_offset);
370 }
371
372 for (i = 0; i < fnic->wq_copy_count; i++) {
373 vnic_wq_copy_init(&fnic->wq_copy[i],
374 0 /* cq_index 0 - always */,
375 error_interrupt_enable,
376 error_interrupt_offset);
377 }
378
379 for (i = 0; i < fnic->cq_count; i++) {
380
381 switch (intr_mode) {
382 case VNIC_DEV_INTR_MODE_MSIX:
383 interrupt_offset = i;
384 break;
385 default:
386 interrupt_offset = 0;
387 break;
388 }
389
390 vnic_cq_init(&fnic->cq[i],
391 0 /* flow_control_enable */,
392 1 /* color_enable */,
393 0 /* cq_head */,
394 0 /* cq_tail */,
395 1 /* cq_tail_color */,
396 1 /* interrupt_enable */,
397 1 /* cq_entry_enable */,
398 0 /* cq_message_enable */,
399 interrupt_offset,
400 0 /* cq_message_addr */);
401 }
402
403 /*
404 * Init INTR resources
405 *
406 * mask_on_assertion is not used for INTx due to the level-
407 * triggered nature of INTx
408 */
409
410 switch (intr_mode) {
411 case VNIC_DEV_INTR_MODE_MSI:
412 case VNIC_DEV_INTR_MODE_MSIX:
413 mask_on_assertion = 1;
414 break;
415 default:
416 mask_on_assertion = 0;
417 break;
418 }
419
420 for (i = 0; i < fnic->intr_count; i++) {
421 vnic_intr_init(&fnic->intr[i],
422 fnic->config.intr_timer,
423 fnic->config.intr_timer_type,
424 mask_on_assertion);
425 }
426
427 /* init the stats memory by making the first call here */
428 err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
429 if (err) {
430 shost_printk(KERN_ERR, fnic->lport->host,
431 "vnic_dev_stats_dump failed - x%x\n", err);
432 goto err_out_cleanup;
433 }
434
435 /* Clear LIF stats */
436 vnic_dev_stats_clear(fnic->vdev);
437
438 return 0;
439
440err_out_cleanup:
441 fnic_free_vnic_resources(fnic);
442
443 return err;
444}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 00000000000..b6f31026253
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,197 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_RES_H_
19#define _FNIC_RES_H_
20
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "vnic_wq.h"
24#include "vnic_rq.h"
25#include "fnic_io.h"
26#include "fcpio.h"
27#include "vnic_wq_copy.h"
28#include "vnic_cq_copy.h"
29
30static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
31 void *os_buf, dma_addr_t dma_addr,
32 unsigned int len, unsigned int fc_eof,
33 int vlan_tag_insert,
34 unsigned int vlan_tag,
35 int cq_entry, int sop, int eop)
36{
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
38
39 wq_enet_desc_enc(desc,
40 (u64)dma_addr | VNIC_PADDR_TARGET,
41 (u16)len,
42 0, /* mss_or_csum_offset */
43 (u16)fc_eof,
44 0, /* offload_mode */
45 (u8)eop, (u8)cq_entry,
46 1, /* fcoe_encap */
47 (u8)vlan_tag_insert,
48 (u16)vlan_tag,
49 0 /* loopback */);
50
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
52}
53
54static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
55 u32 req_id,
56 u32 lunmap_id, u8 spl_flags,
57 u32 sgl_cnt, u32 sense_len,
58 u64 sgl_addr, u64 sns_addr,
59 u8 crn, u8 pri_ta,
60 u8 flags, u8 *scsi_cdb,
61 u32 data_len, u8 *lun,
62 u32 d_id, u16 mss,
63 u32 ratov, u32 edtov)
64{
65 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
66
67 desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
68 desc->hdr.status = 0; /* header status entry */
69 desc->hdr._resvd = 0; /* reserved */
70 desc->hdr.tag.u.req_id = req_id; /* id for this request */
71
72 desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
73 desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
74 desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
75 desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
76 desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
77 desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
78 desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
79 desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
80 desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
81 desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
82 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
83 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
84 desc->u.icmnd_16.flags = flags; /* command flags */
85 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */
86 desc->u.icmnd_16.data_len = data_len; /* length of data expected */
87 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
88 desc->u.icmnd_16._resvd2 = 0; /* reserved */
89 hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
90 desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
91 desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
92 desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
93
94 vnic_wq_copy_post(wq);
95}
96
97static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
98 u32 req_id, u32 lunmap_id,
99 u32 tm_req, u32 tm_id, u8 *lun,
100 u32 d_id, u32 r_a_tov,
101 u32 e_d_tov)
102{
103 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
104
105 desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
106 desc->hdr.status = 0; /* header status entry */
107 desc->hdr._resvd = 0; /* reserved */
108 desc->hdr.tag.u.req_id = req_id; /* id for this request */
109
110 desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
111 desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
112 desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
113 desc->u.itmf._resvd = 0;
114 memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
115 desc->u.itmf._resvd1 = 0;
116 hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
117 desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
118 desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
119
120 vnic_wq_copy_post(wq);
121}
122
123static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
124 u32 req_id, u8 format,
125 u32 s_id, u8 *gw_mac)
126{
127 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
128
129 desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
130 desc->hdr.status = 0; /* header status entry */
131 desc->hdr._resvd = 0; /* reserved */
132 desc->hdr.tag.u.req_id = req_id; /* id for this request */
133
134 desc->u.flogi_reg.format = format;
135 hton24(desc->u.flogi_reg.s_id, s_id);
136 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
137
138 vnic_wq_copy_post(wq);
139}
140
141static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
142 u32 req_id)
143{
144 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
145
146 desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
147 desc->hdr.status = 0; /* header status entry */
148 desc->hdr._resvd = 0; /* reserved */
149 desc->hdr.tag.u.req_id = req_id; /* id for this request */
150
151 vnic_wq_copy_post(wq);
152}
153
154static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
155 u32 req_id, u64 lunmap_addr,
156 u32 lunmap_len)
157{
158 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
159
160 desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
161 desc->hdr.status = 0; /* header status entry */
162 desc->hdr._resvd = 0; /* reserved */
163 desc->hdr.tag.u.req_id = req_id; /* id for this request */
164
165 desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
166 desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
167
168 vnic_wq_copy_post(wq);
169}
170
171static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
172 void *os_buf, dma_addr_t dma_addr,
173 u16 len)
174{
175 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
176
177 rq_enet_desc_enc(desc,
178 (u64)dma_addr | VNIC_PADDR_TARGET,
179 RQ_ENET_TYPE_ONLY_SOP,
180 (u16)len);
181
182 vnic_rq_post(rq, os_buf, 0, dma_addr, len);
183}
184
185
186struct fnic;
187
188int fnic_get_vnic_config(struct fnic *);
189int fnic_alloc_vnic_resources(struct fnic *);
190void fnic_free_vnic_resources(struct fnic *);
191void fnic_get_res_counts(struct fnic *);
192int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
193 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
194 u8 rss_enable, u8 tso_ipid_split_en,
195 u8 ig_vlan_strip_en);
196
197#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 00000000000..eabf3650285
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,1850 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/delay.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_tcq.h>
34#include <scsi/fc/fc_els.h>
35#include <scsi/fc/fc_fcoe.h>
36#include <scsi/libfc.h>
37#include <scsi/fc_frame.h>
38#include "fnic_io.h"
39#include "fnic.h"
40
41const char *fnic_state_str[] = {
42 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
43 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
44 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
45 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
46};
47
48static const char *fnic_ioreq_state_str[] = {
49 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
50 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
51 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
52 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
53};
54
55static const char *fcpio_status_str[] = {
56 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
57 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
58 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
59 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
60 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
61 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
62 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
63 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
64 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
65 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
66 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
67 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
68 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
69 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
70 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
71 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
72 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
73 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
74 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
75};
76
77const char *fnic_state_to_str(unsigned int state)
78{
79 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
80 return "unknown";
81
82 return fnic_state_str[state];
83}
84
85static const char *fnic_ioreq_state_to_str(unsigned int state)
86{
87 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
88 !fnic_ioreq_state_str[state])
89 return "unknown";
90
91 return fnic_ioreq_state_str[state];
92}
93
94static const char *fnic_fcpio_status_to_str(unsigned int status)
95{
96 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
97 return "unknown";
98
99 return fcpio_status_str[status];
100}
101
102static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
103
104static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
105 struct scsi_cmnd *sc)
106{
107 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
108
109 return &fnic->io_req_lock[hash];
110}
111
112/*
113 * Unmap the data buffer and sense buffer for an io_req,
114 * also unmap and free the device-private scatter/gather list.
115 */
116static void fnic_release_ioreq_buf(struct fnic *fnic,
117 struct fnic_io_req *io_req,
118 struct scsi_cmnd *sc)
119{
120 if (io_req->sgl_list_pa)
121 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
122 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
123 PCI_DMA_TODEVICE);
124 scsi_dma_unmap(sc);
125
126 if (io_req->sgl_cnt)
127 mempool_free(io_req->sgl_list_alloc,
128 fnic->io_sgl_pool[io_req->sgl_type]);
129 if (io_req->sense_buf_pa)
130 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
131 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
132}
133
134/* Free up Copy Wq descriptors. Called with copy_wq lock held */
135static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
136{
137 /* if no Ack received from firmware, then nothing to clean */
138 if (!fnic->fw_ack_recd[0])
139 return 1;
140
141 /*
142 * Update desc_available count based on number of freed descriptors
143 * Account for wraparound
144 */
145 if (wq->to_clean_index <= fnic->fw_ack_index[0])
146 wq->ring.desc_avail += (fnic->fw_ack_index[0]
147 - wq->to_clean_index + 1);
148 else
149 wq->ring.desc_avail += (wq->ring.desc_count
150 - wq->to_clean_index
151 + fnic->fw_ack_index[0] + 1);
152
153 /*
154 * just bump clean index to ack_index+1 accounting for wraparound
155 * this will essentially free up all descriptors between
156 * to_clean_index and fw_ack_index, both inclusive
157 */
158 wq->to_clean_index =
159 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
160
161 /* we have processed the acks received so far */
162 fnic->fw_ack_recd[0] = 0;
163 return 0;
164}
165
166
167/*
168 * fnic_fw_reset_handler
169 * Routine to send reset msg to fw
170 */
171int fnic_fw_reset_handler(struct fnic *fnic)
172{
173 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
174 int ret = 0;
175 unsigned long flags;
176
177 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
178
179 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
180 free_wq_copy_descs(fnic, wq);
181
182 if (!vnic_wq_copy_desc_avail(wq))
183 ret = -EAGAIN;
184 else
185 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
186
187 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
188
189 if (!ret)
190 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
191 "Issued fw reset\n");
192 else
193 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
194 "Failed to issue fw reset\n");
195 return ret;
196}
197
198
199/*
200 * fnic_flogi_reg_handler
201 * Routine to send flogi register msg to fw
202 */
203int fnic_flogi_reg_handler(struct fnic *fnic)
204{
205 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
206 u8 gw_mac[ETH_ALEN];
207 int ret = 0;
208 unsigned long flags;
209
210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
211
212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
213 free_wq_copy_descs(fnic, wq);
214
215 if (!vnic_wq_copy_desc_avail(wq)) {
216 ret = -EAGAIN;
217 goto flogi_reg_ioreq_end;
218 }
219
220 if (fnic->fcoui_mode)
221 memset(gw_mac, 0xff, ETH_ALEN);
222 else
223 memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
224
225 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
226 FCPIO_FLOGI_REG_GW_DEST,
227 fnic->s_id,
228 gw_mac);
229
230flogi_reg_ioreq_end:
231 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
232
233 if (!ret)
234 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
235 "flog reg issued\n");
236
237 return ret;
238}
239
240/*
241 * fnic_queue_wq_copy_desc
242 * Routine to enqueue a wq copy desc
243 */
244static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
245 struct vnic_wq_copy *wq,
246 struct fnic_io_req *io_req,
247 struct scsi_cmnd *sc,
248 u32 sg_count)
249{
250 struct scatterlist *sg;
251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
252 struct fc_rport_libfc_priv *rp = rport->dd_data;
253 struct host_sg_desc *desc;
254 u8 pri_tag = 0;
255 unsigned int i;
256 unsigned long intr_flags;
257 int flags;
258 u8 exch_flags;
259 struct scsi_lun fc_lun;
260 char msg[2];
261
262 if (sg_count) {
263 BUG_ON(sg_count < 0);
264 BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
265
266 /* For each SGE, create a device desc entry */
267 desc = io_req->sgl_list;
268 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
269 desc->addr = cpu_to_le64(sg_dma_address(sg));
270 desc->len = cpu_to_le32(sg_dma_len(sg));
271 desc->_resvd = 0;
272 desc++;
273 }
274
275 io_req->sgl_list_pa = pci_map_single
276 (fnic->pdev,
277 io_req->sgl_list,
278 sizeof(io_req->sgl_list[0]) * sg_count,
279 PCI_DMA_TODEVICE);
280 }
281
282 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
283 sc->sense_buffer,
284 SCSI_SENSE_BUFFERSIZE,
285 PCI_DMA_FROMDEVICE);
286
287 int_to_scsilun(sc->device->lun, &fc_lun);
288
289 pri_tag = FCPIO_ICMND_PTA_SIMPLE;
290 msg[0] = MSG_SIMPLE_TAG;
291 scsi_populate_tag_msg(sc, msg);
292 if (msg[0] == MSG_ORDERED_TAG)
293 pri_tag = FCPIO_ICMND_PTA_ORDERED;
294
295 /* Enqueue the descriptor in the Copy WQ */
296 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
297
298 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
299 free_wq_copy_descs(fnic, wq);
300
301 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
302 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
303 return SCSI_MLQUEUE_HOST_BUSY;
304 }
305
306 flags = 0;
307 if (sc->sc_data_direction == DMA_FROM_DEVICE)
308 flags = FCPIO_ICMND_RDDATA;
309 else if (sc->sc_data_direction == DMA_TO_DEVICE)
310 flags = FCPIO_ICMND_WRDATA;
311
312 exch_flags = 0;
313 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
314 (rp->flags & FC_RP_FLAGS_RETRY))
315 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
316
317 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
318 0, exch_flags, io_req->sgl_cnt,
319 SCSI_SENSE_BUFFERSIZE,
320 io_req->sgl_list_pa,
321 io_req->sense_buf_pa,
322 0, /* scsi cmd ref, always 0 */
323 pri_tag, /* scsi pri and tag */
324 flags, /* command flags */
325 sc->cmnd, scsi_bufflen(sc),
326 fc_lun.scsi_lun, io_req->port_id,
327 rport->maxframe_size, rp->r_a_tov,
328 rp->e_d_tov);
329
330 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
331 return 0;
332}
333
334/*
335 * fnic_queuecommand
336 * Routine to send a scsi cdb
337 * Called with host_lock held and interrupts disabled.
338 */
339int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
340{
341 struct fc_lport *lp;
342 struct fc_rport *rport;
343 struct fnic_io_req *io_req;
344 struct fnic *fnic;
345 struct vnic_wq_copy *wq;
346 int ret;
347 u32 sg_count;
348 unsigned long flags;
349 unsigned long ptr;
350
351 rport = starget_to_rport(scsi_target(sc->device));
352 ret = fc_remote_port_chkready(rport);
353 if (ret) {
354 sc->result = ret;
355 done(sc);
356 return 0;
357 }
358
359 lp = shost_priv(sc->device->host);
360 if (lp->state != LPORT_ST_READY || !(lp->link_up))
361 return SCSI_MLQUEUE_HOST_BUSY;
362
363 /*
364 * Release host lock, use driver resource specific locks from here.
365 * Don't re-enable interrupts in case they were disabled prior to the
366 * caller disabling them.
367 */
368 spin_unlock(lp->host->host_lock);
369
370 /* Get a new io_req for this SCSI IO */
371 fnic = lport_priv(lp);
372
373 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
374 if (!io_req) {
375 ret = SCSI_MLQUEUE_HOST_BUSY;
376 goto out;
377 }
378 memset(io_req, 0, sizeof(*io_req));
379
380 /* Map the data buffer */
381 sg_count = scsi_dma_map(sc);
382 if (sg_count < 0) {
383 mempool_free(io_req, fnic->io_req_pool);
384 goto out;
385 }
386
387 /* Determine the type of scatter/gather list we need */
388 io_req->sgl_cnt = sg_count;
389 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
390 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
391 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
392
393 if (sg_count) {
394 io_req->sgl_list =
395 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
396 GFP_ATOMIC | GFP_DMA);
397 if (!io_req->sgl_list) {
398 ret = SCSI_MLQUEUE_HOST_BUSY;
399 scsi_dma_unmap(sc);
400 mempool_free(io_req, fnic->io_req_pool);
401 goto out;
402 }
403
404 /* Cache sgl list allocated address before alignment */
405 io_req->sgl_list_alloc = io_req->sgl_list;
406 ptr = (unsigned long) io_req->sgl_list;
407 if (ptr % FNIC_SG_DESC_ALIGN) {
408 io_req->sgl_list = (struct host_sg_desc *)
409 (((unsigned long) ptr
410 + FNIC_SG_DESC_ALIGN - 1)
411 & ~(FNIC_SG_DESC_ALIGN - 1));
412 }
413 }
414
415 /* initialize rest of io_req */
416 io_req->port_id = rport->port_id;
417 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
418 CMD_SP(sc) = (char *)io_req;
419 sc->scsi_done = done;
420
421 /* create copy wq desc and enqueue it */
422 wq = &fnic->wq_copy[0];
423 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
424 if (ret) {
425 /*
426 * In case another thread cancelled the request,
427 * refetch the pointer under the lock.
428 */
429 spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
430
431 spin_lock_irqsave(io_lock, flags);
432 io_req = (struct fnic_io_req *)CMD_SP(sc);
433 CMD_SP(sc) = NULL;
434 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
435 spin_unlock_irqrestore(io_lock, flags);
436 if (io_req) {
437 fnic_release_ioreq_buf(fnic, io_req, sc);
438 mempool_free(io_req, fnic->io_req_pool);
439 }
440 }
441out:
442 /* acquire host lock before returning to SCSI */
443 spin_lock(lp->host->host_lock);
444 return ret;
445}
446
447/*
448 * fnic_fcpio_fw_reset_cmpl_handler
449 * Routine to handle fw reset completion
450 */
451static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
452 struct fcpio_fw_req *desc)
453{
454 u8 type;
455 u8 hdr_status;
456 struct fcpio_tag tag;
457 int ret = 0;
458 struct fc_frame *flogi;
459 unsigned long flags;
460
461 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
462
463 /* Clean up all outstanding io requests */
464 fnic_cleanup_io(fnic, SCSI_NO_TAG);
465
466 spin_lock_irqsave(&fnic->fnic_lock, flags);
467
468 flogi = fnic->flogi;
469 fnic->flogi = NULL;
470
471 /* fnic should be in FC_TRANS_ETH_MODE */
472 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
473 /* Check status of reset completion */
474 if (!hdr_status) {
475 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
476 "reset cmpl success\n");
477 /* Ready to send flogi out */
478 fnic->state = FNIC_IN_ETH_MODE;
479 } else {
480 FNIC_SCSI_DBG(KERN_DEBUG,
481 fnic->lport->host,
482 "fnic fw_reset : failed %s\n",
483 fnic_fcpio_status_to_str(hdr_status));
484
485 /*
486 * Unable to change to eth mode, cannot send out flogi
487 * Change state to fc mode, so that subsequent Flogi
488 * requests from libFC will cause more attempts to
489 * reset the firmware. Free the cached flogi
490 */
491 fnic->state = FNIC_IN_FC_MODE;
492 ret = -1;
493 }
494 } else {
495 FNIC_SCSI_DBG(KERN_DEBUG,
496 fnic->lport->host,
497 "Unexpected state %s while processing"
498 " reset cmpl\n", fnic_state_to_str(fnic->state));
499 ret = -1;
500 }
501
502 /* Thread removing device blocks till firmware reset is complete */
503 if (fnic->remove_wait)
504 complete(fnic->remove_wait);
505
506 /*
507 * If fnic is being removed, or fw reset failed
508 * free the flogi frame. Else, send it out
509 */
510 if (fnic->remove_wait || ret) {
511 fnic->flogi_oxid = FC_XID_UNKNOWN;
512 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
513 if (flogi)
514 dev_kfree_skb_irq(fp_skb(flogi));
515 goto reset_cmpl_handler_end;
516 }
517
518 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
519
520 if (flogi)
521 ret = fnic_send_frame(fnic, flogi);
522
523 reset_cmpl_handler_end:
524 return ret;
525}
526
527/*
528 * fnic_fcpio_flogi_reg_cmpl_handler
529 * Routine to handle flogi register completion
530 */
531static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
532 struct fcpio_fw_req *desc)
533{
534 u8 type;
535 u8 hdr_status;
536 struct fcpio_tag tag;
537 int ret = 0;
538 struct fc_frame *flogi_resp = NULL;
539 unsigned long flags;
540 struct sk_buff *skb;
541
542 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
543
544 /* Update fnic state based on status of flogi reg completion */
545 spin_lock_irqsave(&fnic->fnic_lock, flags);
546
547 flogi_resp = fnic->flogi_resp;
548 fnic->flogi_resp = NULL;
549
550 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
551
552 /* Check flogi registration completion status */
553 if (!hdr_status) {
554 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
555 "flog reg succeeded\n");
556 fnic->state = FNIC_IN_FC_MODE;
557 } else {
558 FNIC_SCSI_DBG(KERN_DEBUG,
559 fnic->lport->host,
560 "fnic flogi reg :failed %s\n",
561 fnic_fcpio_status_to_str(hdr_status));
562 fnic->state = FNIC_IN_ETH_MODE;
563 ret = -1;
564 }
565 } else {
566 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
567 "Unexpected fnic state %s while"
568 " processing flogi reg completion\n",
569 fnic_state_to_str(fnic->state));
570 ret = -1;
571 }
572
573 /* Successful flogi reg cmpl, pass frame to LibFC */
574 if (!ret && flogi_resp) {
575 if (fnic->stop_rx_link_events) {
576 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
577 goto reg_cmpl_handler_end;
578 }
579 skb = (struct sk_buff *)flogi_resp;
580 /* Use fr_flags to indicate whether flogi resp or not */
581 fr_flags(flogi_resp) = 1;
582 fr_dev(flogi_resp) = fnic->lport;
583 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
584
585 skb_queue_tail(&fnic->frame_queue, skb);
586 queue_work(fnic_event_queue, &fnic->frame_work);
587
588 } else {
589 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
590 if (flogi_resp)
591 dev_kfree_skb_irq(fp_skb(flogi_resp));
592 }
593
594reg_cmpl_handler_end:
595 return ret;
596}
597
598static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
599 u16 request_out)
600{
601 if (wq->to_clean_index <= wq->to_use_index) {
602 /* out of range, stale request_out index */
603 if (request_out < wq->to_clean_index ||
604 request_out >= wq->to_use_index)
605 return 0;
606 } else {
607 /* out of range, stale request_out index */
608 if (request_out < wq->to_clean_index &&
609 request_out >= wq->to_use_index)
610 return 0;
611 }
612 /* request_out index is in range */
613 return 1;
614}
615
616
617/*
618 * Mark that ack received and store the Ack index. If there are multiple
619 * acks received before Tx thread cleans it up, the latest value will be
620 * used which is correct behavior. This state should be in the copy Wq
621 * instead of in the fnic
622 */
623static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
624 unsigned int cq_index,
625 struct fcpio_fw_req *desc)
626{
627 struct vnic_wq_copy *wq;
628 u16 request_out = desc->u.ack.request_out;
629 unsigned long flags;
630
631 /* mark the ack state */
632 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
633 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
634
635 if (is_ack_index_in_range(wq, request_out)) {
636 fnic->fw_ack_index[0] = request_out;
637 fnic->fw_ack_recd[0] = 1;
638 }
639 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
640}
641
642/*
643 * fnic_fcpio_icmnd_cmpl_handler
644 * Routine to handle icmnd completions
645 */
646static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
647 struct fcpio_fw_req *desc)
648{
649 u8 type;
650 u8 hdr_status;
651 struct fcpio_tag tag;
652 u32 id;
653 u64 xfer_len = 0;
654 struct fcpio_icmnd_cmpl *icmnd_cmpl;
655 struct fnic_io_req *io_req;
656 struct scsi_cmnd *sc;
657 unsigned long flags;
658 spinlock_t *io_lock;
659
660 /* Decode the cmpl description to get the io_req id */
661 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
662 fcpio_tag_id_dec(&tag, &id);
663
664 if (id >= FNIC_MAX_IO_REQ)
665 return;
666
667 sc = scsi_host_find_tag(fnic->lport->host, id);
668 WARN_ON_ONCE(!sc);
669 if (!sc)
670 return;
671
672 io_lock = fnic_io_lock_hash(fnic, sc);
673 spin_lock_irqsave(io_lock, flags);
674 io_req = (struct fnic_io_req *)CMD_SP(sc);
675 WARN_ON_ONCE(!io_req);
676 if (!io_req) {
677 spin_unlock_irqrestore(io_lock, flags);
678 return;
679 }
680
681 /* firmware completed the io */
682 io_req->io_completed = 1;
683
684 /*
685 * if SCSI-ML has already issued abort on this command,
686 * ignore completion of the IO. The abts path will clean it up
687 */
688 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
689 spin_unlock_irqrestore(io_lock, flags);
690 return;
691 }
692
693 /* Mark the IO as complete */
694 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
695
696 icmnd_cmpl = &desc->u.icmnd_cmpl;
697
698 switch (hdr_status) {
699 case FCPIO_SUCCESS:
700 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
701 xfer_len = scsi_bufflen(sc);
702 scsi_set_resid(sc, icmnd_cmpl->residual);
703
704 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
705 xfer_len -= icmnd_cmpl->residual;
706
707 /*
708 * If queue_full, then try to reduce queue depth for all
709 * LUNS on the target. Todo: this should be accompanied
710 * by a periodic queue_depth rampup based on successful
711 * IO completion.
712 */
713 if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
714 struct scsi_device *t_sdev;
715 int qd = 0;
716
717 shost_for_each_device(t_sdev, sc->device->host) {
718 if (t_sdev->id != sc->device->id)
719 continue;
720
721 if (t_sdev->queue_depth > 1) {
722 qd = scsi_track_queue_full
723 (t_sdev,
724 t_sdev->queue_depth - 1);
725 if (qd == -1)
726 qd = t_sdev->host->cmd_per_lun;
727 shost_printk(KERN_INFO,
728 fnic->lport->host,
729 "scsi[%d:%d:%d:%d"
730 "] queue full detected,"
731 "new depth = %d\n",
732 t_sdev->host->host_no,
733 t_sdev->channel,
734 t_sdev->id, t_sdev->lun,
735 t_sdev->queue_depth);
736 }
737 }
738 }
739 break;
740
741 case FCPIO_TIMEOUT: /* request was timed out */
742 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
743 break;
744
745 case FCPIO_ABORTED: /* request was aborted */
746 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
747 break;
748
749 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
750 scsi_set_resid(sc, icmnd_cmpl->residual);
751 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
752 break;
753
754 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
755 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
756 break;
757 case FCPIO_INVALID_HEADER: /* header contains invalid data */
758 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
759 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
760 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
761 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
762 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
763 case FCPIO_FW_ERR: /* request was terminated due fw error */
764 default:
765 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
766 fnic_fcpio_status_to_str(hdr_status));
767 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
768 break;
769 }
770
771 /* Break link with the SCSI command */
772 CMD_SP(sc) = NULL;
773
774 spin_unlock_irqrestore(io_lock, flags);
775
776 fnic_release_ioreq_buf(fnic, io_req, sc);
777
778 mempool_free(io_req, fnic->io_req_pool);
779
780 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
781 fnic->lport->host_stats.fcp_input_requests++;
782 fnic->fcp_input_bytes += xfer_len;
783 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
784 fnic->lport->host_stats.fcp_output_requests++;
785 fnic->fcp_output_bytes += xfer_len;
786 } else
787 fnic->lport->host_stats.fcp_control_requests++;
788
789 /* Call SCSI completion function to complete the IO */
790 if (sc->scsi_done)
791 sc->scsi_done(sc);
792
793}
794
795/* fnic_fcpio_itmf_cmpl_handler
796 * Routine to handle itmf completions
797 */
798static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
799 struct fcpio_fw_req *desc)
800{
801 u8 type;
802 u8 hdr_status;
803 struct fcpio_tag tag;
804 u32 id;
805 struct scsi_cmnd *sc;
806 struct fnic_io_req *io_req;
807 unsigned long flags;
808 spinlock_t *io_lock;
809
810 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
811 fcpio_tag_id_dec(&tag, &id);
812
813 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
814 return;
815
816 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
817 WARN_ON_ONCE(!sc);
818 if (!sc)
819 return;
820
821 io_lock = fnic_io_lock_hash(fnic, sc);
822 spin_lock_irqsave(io_lock, flags);
823 io_req = (struct fnic_io_req *)CMD_SP(sc);
824 WARN_ON_ONCE(!io_req);
825 if (!io_req) {
826 spin_unlock_irqrestore(io_lock, flags);
827 return;
828 }
829
830 if (id & FNIC_TAG_ABORT) {
831 /* Completion of abort cmd */
832 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
833 /* This is a late completion. Ignore it */
834 spin_unlock_irqrestore(io_lock, flags);
835 return;
836 }
837 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
838 CMD_ABTS_STATUS(sc) = hdr_status;
839
840 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
841 "abts cmpl recd. id %d status %s\n",
842 (int)(id & FNIC_TAG_MASK),
843 fnic_fcpio_status_to_str(hdr_status));
844
845 /*
846 * If scsi_eh thread is blocked waiting for abts to complete,
847 * signal completion to it. IO will be cleaned in the thread
848 * else clean it in this context
849 */
850 if (io_req->abts_done) {
851 complete(io_req->abts_done);
852 spin_unlock_irqrestore(io_lock, flags);
853 } else {
854 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
855 "abts cmpl, completing IO\n");
856 CMD_SP(sc) = NULL;
857 sc->result = (DID_ERROR << 16);
858
859 spin_unlock_irqrestore(io_lock, flags);
860
861 fnic_release_ioreq_buf(fnic, io_req, sc);
862 mempool_free(io_req, fnic->io_req_pool);
863 if (sc->scsi_done)
864 sc->scsi_done(sc);
865 }
866
867 } else if (id & FNIC_TAG_DEV_RST) {
868 /* Completion of device reset */
869 CMD_LR_STATUS(sc) = hdr_status;
870 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
871 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
872 "dev reset cmpl recd. id %d status %s\n",
873 (int)(id & FNIC_TAG_MASK),
874 fnic_fcpio_status_to_str(hdr_status));
875 if (io_req->dr_done)
876 complete(io_req->dr_done);
877 spin_unlock_irqrestore(io_lock, flags);
878
879 } else {
880 shost_printk(KERN_ERR, fnic->lport->host,
881 "Unexpected itmf io state %s tag %x\n",
882 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
883 spin_unlock_irqrestore(io_lock, flags);
884 }
885
886}
887
888/*
889 * fnic_fcpio_cmpl_handler
890 * Routine to service the cq for wq_copy
891 */
892static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
893 unsigned int cq_index,
894 struct fcpio_fw_req *desc)
895{
896 struct fnic *fnic = vnic_dev_priv(vdev);
897 int ret = 0;
898
899 switch (desc->hdr.type) {
900 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
901 fnic_fcpio_ack_handler(fnic, cq_index, desc);
902 break;
903
904 case FCPIO_ICMND_CMPL: /* fw completed a command */
905 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
906 break;
907
908 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
909 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
910 break;
911
912 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
913 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
914 break;
915
916 case FCPIO_RESET_CMPL: /* fw completed reset */
917 ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
918 break;
919
920 default:
921 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
922 "firmware completion type %d\n",
923 desc->hdr.type);
924 break;
925 }
926
927 return ret;
928}
929
930/*
931 * fnic_wq_copy_cmpl_handler
932 * Routine to process wq copy
933 */
934int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
935{
936 unsigned int wq_work_done = 0;
937 unsigned int i, cq_index;
938 unsigned int cur_work_done;
939
940 for (i = 0; i < fnic->wq_copy_count; i++) {
941 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
942 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
943 fnic_fcpio_cmpl_handler,
944 copy_work_to_do);
945 wq_work_done += cur_work_done;
946 }
947 return wq_work_done;
948}
949
950static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
951{
952 unsigned int i;
953 struct fnic_io_req *io_req;
954 unsigned long flags = 0;
955 struct scsi_cmnd *sc;
956 spinlock_t *io_lock;
957
958 for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
959 if (i == exclude_id)
960 continue;
961
962 sc = scsi_host_find_tag(fnic->lport->host, i);
963 if (!sc)
964 continue;
965
966 io_lock = fnic_io_lock_hash(fnic, sc);
967 spin_lock_irqsave(io_lock, flags);
968 io_req = (struct fnic_io_req *)CMD_SP(sc);
969 if (!io_req) {
970 spin_unlock_irqrestore(io_lock, flags);
971 goto cleanup_scsi_cmd;
972 }
973
974 CMD_SP(sc) = NULL;
975
976 spin_unlock_irqrestore(io_lock, flags);
977
978 /*
979 * If there is a scsi_cmnd associated with this io_req, then
980 * free the corresponding state
981 */
982 fnic_release_ioreq_buf(fnic, io_req, sc);
983 mempool_free(io_req, fnic->io_req_pool);
984
985cleanup_scsi_cmd:
986 sc->result = DID_TRANSPORT_DISRUPTED << 16;
987 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
988 " DID_TRANSPORT_DISRUPTED\n");
989
990 /* Complete the command to SCSI */
991 if (sc->scsi_done)
992 sc->scsi_done(sc);
993 }
994}
995
996void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
997 struct fcpio_host_req *desc)
998{
999 u32 id;
1000 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1001 struct fnic_io_req *io_req;
1002 struct scsi_cmnd *sc;
1003 unsigned long flags;
1004 spinlock_t *io_lock;
1005
1006 /* get the tag reference */
1007 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1008 id &= FNIC_TAG_MASK;
1009
1010 if (id >= FNIC_MAX_IO_REQ)
1011 return;
1012
1013 sc = scsi_host_find_tag(fnic->lport->host, id);
1014 if (!sc)
1015 return;
1016
1017 io_lock = fnic_io_lock_hash(fnic, sc);
1018 spin_lock_irqsave(io_lock, flags);
1019
1020 /* Get the IO context which this desc refers to */
1021 io_req = (struct fnic_io_req *)CMD_SP(sc);
1022
1023 /* fnic interrupts are turned off by now */
1024
1025 if (!io_req) {
1026 spin_unlock_irqrestore(io_lock, flags);
1027 goto wq_copy_cleanup_scsi_cmd;
1028 }
1029
1030 CMD_SP(sc) = NULL;
1031
1032 spin_unlock_irqrestore(io_lock, flags);
1033
1034 fnic_release_ioreq_buf(fnic, io_req, sc);
1035 mempool_free(io_req, fnic->io_req_pool);
1036
1037wq_copy_cleanup_scsi_cmd:
1038 sc->result = DID_NO_CONNECT << 16;
1039 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1040 " DID_NO_CONNECT\n");
1041
1042 if (sc->scsi_done)
1043 sc->scsi_done(sc);
1044}
1045
1046static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1047 u32 task_req, u8 *fc_lun,
1048 struct fnic_io_req *io_req)
1049{
1050 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1054
1055 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1056 free_wq_copy_descs(fnic, wq);
1057
1058 if (!vnic_wq_copy_desc_avail(wq)) {
1059 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1060 return 1;
1061 }
1062 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1063 0, task_req, tag, fc_lun, io_req->port_id,
1064 fnic->config.ra_tov, fnic->config.ed_tov);
1065
1066 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1067 return 0;
1068}
1069
1070void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1071{
1072 int tag;
1073 struct fnic_io_req *io_req;
1074 spinlock_t *io_lock;
1075 unsigned long flags;
1076 struct scsi_cmnd *sc;
1077 struct scsi_lun fc_lun;
1078 enum fnic_ioreq_state old_ioreq_state;
1079
1080 FNIC_SCSI_DBG(KERN_DEBUG,
1081 fnic->lport->host,
1082 "fnic_rport_reset_exch called portid 0x%06x\n",
1083 port_id);
1084
1085 if (fnic->in_remove)
1086 return;
1087
1088 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1089 sc = scsi_host_find_tag(fnic->lport->host, tag);
1090 if (!sc)
1091 continue;
1092
1093 io_lock = fnic_io_lock_hash(fnic, sc);
1094 spin_lock_irqsave(io_lock, flags);
1095
1096 io_req = (struct fnic_io_req *)CMD_SP(sc);
1097
1098 if (!io_req || io_req->port_id != port_id) {
1099 spin_unlock_irqrestore(io_lock, flags);
1100 continue;
1101 }
1102
1103 /*
1104 * Found IO that is still pending with firmware and
1105 * belongs to rport that went away
1106 */
1107 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1108 spin_unlock_irqrestore(io_lock, flags);
1109 continue;
1110 }
1111 old_ioreq_state = CMD_STATE(sc);
1112 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1113 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1114
1115 BUG_ON(io_req->abts_done);
1116
1117 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1118 "fnic_rport_reset_exch: Issuing abts\n");
1119
1120 spin_unlock_irqrestore(io_lock, flags);
1121
1122 /* Now queue the abort command to firmware */
1123 int_to_scsilun(sc->device->lun, &fc_lun);
1124
1125 if (fnic_queue_abort_io_req(fnic, tag,
1126 FCPIO_ITMF_ABT_TASK_TERM,
1127 fc_lun.scsi_lun, io_req)) {
1128 /*
1129 * Revert the cmd state back to old state, if
1130 * it hasnt changed in between. This cmd will get
1131 * aborted later by scsi_eh, or cleaned up during
1132 * lun reset
1133 */
1134 io_lock = fnic_io_lock_hash(fnic, sc);
1135
1136 spin_lock_irqsave(io_lock, flags);
1137 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1138 CMD_STATE(sc) = old_ioreq_state;
1139 spin_unlock_irqrestore(io_lock, flags);
1140 }
1141 }
1142
1143}
1144
1145void fnic_terminate_rport_io(struct fc_rport *rport)
1146{
1147 int tag;
1148 struct fnic_io_req *io_req;
1149 spinlock_t *io_lock;
1150 unsigned long flags;
1151 struct scsi_cmnd *sc;
1152 struct scsi_lun fc_lun;
1153 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1154 struct fc_lport *lport = rdata->local_port;
1155 struct fnic *fnic = lport_priv(lport);
1156 struct fc_rport *cmd_rport;
1157 enum fnic_ioreq_state old_ioreq_state;
1158
1159 FNIC_SCSI_DBG(KERN_DEBUG,
1160 fnic->lport->host, "fnic_terminate_rport_io called"
1161 " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
1162 rport->port_name, rport->node_name,
1163 rport->port_id);
1164
1165 if (fnic->in_remove)
1166 return;
1167
1168 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1169 sc = scsi_host_find_tag(fnic->lport->host, tag);
1170 if (!sc)
1171 continue;
1172
1173 cmd_rport = starget_to_rport(scsi_target(sc->device));
1174 if (rport != cmd_rport)
1175 continue;
1176
1177 io_lock = fnic_io_lock_hash(fnic, sc);
1178 spin_lock_irqsave(io_lock, flags);
1179
1180 io_req = (struct fnic_io_req *)CMD_SP(sc);
1181
1182 if (!io_req || rport != cmd_rport) {
1183 spin_unlock_irqrestore(io_lock, flags);
1184 continue;
1185 }
1186
1187 /*
1188 * Found IO that is still pending with firmware and
1189 * belongs to rport that went away
1190 */
1191 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1192 spin_unlock_irqrestore(io_lock, flags);
1193 continue;
1194 }
1195 old_ioreq_state = CMD_STATE(sc);
1196 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1197 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1198
1199 BUG_ON(io_req->abts_done);
1200
1201 FNIC_SCSI_DBG(KERN_DEBUG,
1202 fnic->lport->host,
1203 "fnic_terminate_rport_io: Issuing abts\n");
1204
1205 spin_unlock_irqrestore(io_lock, flags);
1206
1207 /* Now queue the abort command to firmware */
1208 int_to_scsilun(sc->device->lun, &fc_lun);
1209
1210 if (fnic_queue_abort_io_req(fnic, tag,
1211 FCPIO_ITMF_ABT_TASK_TERM,
1212 fc_lun.scsi_lun, io_req)) {
1213 /*
1214 * Revert the cmd state back to old state, if
1215 * it hasnt changed in between. This cmd will get
1216 * aborted later by scsi_eh, or cleaned up during
1217 * lun reset
1218 */
1219 io_lock = fnic_io_lock_hash(fnic, sc);
1220
1221 spin_lock_irqsave(io_lock, flags);
1222 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1223 CMD_STATE(sc) = old_ioreq_state;
1224 spin_unlock_irqrestore(io_lock, flags);
1225 }
1226 }
1227
1228}
1229
1230static void fnic_block_error_handler(struct scsi_cmnd *sc)
1231{
1232 struct Scsi_Host *shost = sc->device->host;
1233 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(shost->host_lock, flags);
1237 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1238 spin_unlock_irqrestore(shost->host_lock, flags);
1239 msleep(1000);
1240 spin_lock_irqsave(shost->host_lock, flags);
1241 }
1242 spin_unlock_irqrestore(shost->host_lock, flags);
1243
1244}
1245
1246/*
1247 * This function is exported to SCSI for sending abort cmnds.
1248 * A SCSI IO is represented by a io_req in the driver.
1249 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1250 */
1251int fnic_abort_cmd(struct scsi_cmnd *sc)
1252{
1253 struct fc_lport *lp;
1254 struct fnic *fnic;
1255 struct fnic_io_req *io_req;
1256 struct fc_rport *rport;
1257 spinlock_t *io_lock;
1258 unsigned long flags;
1259 int ret = SUCCESS;
1260 u32 task_req;
1261 struct scsi_lun fc_lun;
1262 DECLARE_COMPLETION_ONSTACK(tm_done);
1263
1264 /* Wait for rport to unblock */
1265 fnic_block_error_handler(sc);
1266
1267 /* Get local-port, check ready and link up */
1268 lp = shost_priv(sc->device->host);
1269
1270 fnic = lport_priv(lp);
1271 FNIC_SCSI_DBG(KERN_DEBUG,
1272 fnic->lport->host,
1273 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1274 (starget_to_rport(scsi_target(sc->device)))->port_id,
1275 sc->device->lun, sc->request->tag);
1276
1277 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1278 ret = FAILED;
1279 goto fnic_abort_cmd_end;
1280 }
1281
1282 /*
1283 * Avoid a race between SCSI issuing the abort and the device
1284 * completing the command.
1285 *
1286 * If the command is already completed by the fw cmpl code,
1287 * we just return SUCCESS from here. This means that the abort
1288 * succeeded. In the SCSI ML, since the timeout for command has
1289 * happened, the completion wont actually complete the command
1290 * and it will be considered as an aborted command
1291 *
1292 * The CMD_SP will not be cleared except while holding io_req_lock.
1293 */
1294 io_lock = fnic_io_lock_hash(fnic, sc);
1295 spin_lock_irqsave(io_lock, flags);
1296 io_req = (struct fnic_io_req *)CMD_SP(sc);
1297 if (!io_req) {
1298 spin_unlock_irqrestore(io_lock, flags);
1299 goto fnic_abort_cmd_end;
1300 }
1301
1302 io_req->abts_done = &tm_done;
1303
1304 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1305 spin_unlock_irqrestore(io_lock, flags);
1306 goto wait_pending;
1307 }
1308 /*
1309 * Command is still pending, need to abort it
1310 * If the firmware completes the command after this point,
1311 * the completion wont be done till mid-layer, since abort
1312 * has already started.
1313 */
1314 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1315 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1316
1317 spin_unlock_irqrestore(io_lock, flags);
1318
1319 /*
1320 * Check readiness of the remote port. If the path to remote
1321 * port is up, then send abts to the remote port to terminate
1322 * the IO. Else, just locally terminate the IO in the firmware
1323 */
1324 rport = starget_to_rport(scsi_target(sc->device));
1325 if (fc_remote_port_chkready(rport) == 0)
1326 task_req = FCPIO_ITMF_ABT_TASK;
1327 else
1328 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1329
1330 /* Now queue the abort command to firmware */
1331 int_to_scsilun(sc->device->lun, &fc_lun);
1332
1333 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1334 fc_lun.scsi_lun, io_req)) {
1335 spin_lock_irqsave(io_lock, flags);
1336 io_req = (struct fnic_io_req *)CMD_SP(sc);
1337 if (io_req)
1338 io_req->abts_done = NULL;
1339 spin_unlock_irqrestore(io_lock, flags);
1340 ret = FAILED;
1341 goto fnic_abort_cmd_end;
1342 }
1343
1344 /*
1345 * We queued an abort IO, wait for its completion.
1346 * Once the firmware completes the abort command, it will
1347 * wake up this thread.
1348 */
1349 wait_pending:
1350 wait_for_completion_timeout(&tm_done,
1351 msecs_to_jiffies
1352 (2 * fnic->config.ra_tov +
1353 fnic->config.ed_tov));
1354
1355 /* Check the abort status */
1356 spin_lock_irqsave(io_lock, flags);
1357
1358 io_req = (struct fnic_io_req *)CMD_SP(sc);
1359 if (!io_req) {
1360 spin_unlock_irqrestore(io_lock, flags);
1361 ret = FAILED;
1362 goto fnic_abort_cmd_end;
1363 }
1364 io_req->abts_done = NULL;
1365
1366 /* fw did not complete abort, timed out */
1367 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1368 spin_unlock_irqrestore(io_lock, flags);
1369 ret = FAILED;
1370 goto fnic_abort_cmd_end;
1371 }
1372
1373 /*
1374 * firmware completed the abort, check the status,
1375 * free the io_req irrespective of failure or success
1376 */
1377 if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1378 ret = FAILED;
1379
1380 CMD_SP(sc) = NULL;
1381
1382 spin_unlock_irqrestore(io_lock, flags);
1383
1384 fnic_release_ioreq_buf(fnic, io_req, sc);
1385 mempool_free(io_req, fnic->io_req_pool);
1386
1387fnic_abort_cmd_end:
1388 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1389 "Returning from abort cmd %s\n",
1390 (ret == SUCCESS) ?
1391 "SUCCESS" : "FAILED");
1392 return ret;
1393}
1394
1395static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1396 struct scsi_cmnd *sc,
1397 struct fnic_io_req *io_req)
1398{
1399 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1400 struct scsi_lun fc_lun;
1401 int ret = 0;
1402 unsigned long intr_flags;
1403
1404 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1405
1406 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1407 free_wq_copy_descs(fnic, wq);
1408
1409 if (!vnic_wq_copy_desc_avail(wq)) {
1410 ret = -EAGAIN;
1411 goto lr_io_req_end;
1412 }
1413
1414 /* fill in the lun info */
1415 int_to_scsilun(sc->device->lun, &fc_lun);
1416
1417 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1418 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1419 fc_lun.scsi_lun, io_req->port_id,
1420 fnic->config.ra_tov, fnic->config.ed_tov);
1421
1422lr_io_req_end:
1423 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1424
1425 return ret;
1426}
1427
1428/*
1429 * Clean up any pending aborts on the lun
1430 * For each outstanding IO on this lun, whose abort is not completed by fw,
1431 * issue a local abort. Wait for abort to complete. Return 0 if all commands
1432 * successfully aborted, 1 otherwise
1433 */
1434static int fnic_clean_pending_aborts(struct fnic *fnic,
1435 struct scsi_cmnd *lr_sc)
1436{
1437 int tag;
1438 struct fnic_io_req *io_req;
1439 spinlock_t *io_lock;
1440 unsigned long flags;
1441 int ret = 0;
1442 struct scsi_cmnd *sc;
1443 struct fc_rport *rport;
1444 struct scsi_lun fc_lun;
1445 struct scsi_device *lun_dev = lr_sc->device;
1446 DECLARE_COMPLETION_ONSTACK(tm_done);
1447
1448 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1449 sc = scsi_host_find_tag(fnic->lport->host, tag);
1450 /*
1451 * ignore this lun reset cmd or cmds that do not belong to
1452 * this lun
1453 */
1454 if (!sc || sc == lr_sc || sc->device != lun_dev)
1455 continue;
1456
1457 io_lock = fnic_io_lock_hash(fnic, sc);
1458 spin_lock_irqsave(io_lock, flags);
1459
1460 io_req = (struct fnic_io_req *)CMD_SP(sc);
1461
1462 if (!io_req || sc->device != lun_dev) {
1463 spin_unlock_irqrestore(io_lock, flags);
1464 continue;
1465 }
1466
1467 /*
1468 * Found IO that is still pending with firmware and
1469 * belongs to the LUN that we are resetting
1470 */
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1472 "Found IO in %s on lun\n",
1473 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1474
1475 BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
1476
1477 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1478 io_req->abts_done = &tm_done;
1479 spin_unlock_irqrestore(io_lock, flags);
1480
1481 /* Now queue the abort command to firmware */
1482 int_to_scsilun(sc->device->lun, &fc_lun);
1483 rport = starget_to_rport(scsi_target(sc->device));
1484
1485 if (fnic_queue_abort_io_req(fnic, tag,
1486 FCPIO_ITMF_ABT_TASK_TERM,
1487 fc_lun.scsi_lun, io_req)) {
1488 spin_lock_irqsave(io_lock, flags);
1489 io_req = (struct fnic_io_req *)CMD_SP(sc);
1490 if (io_req)
1491 io_req->abts_done = NULL;
1492 spin_unlock_irqrestore(io_lock, flags);
1493 ret = 1;
1494 goto clean_pending_aborts_end;
1495 }
1496
1497 wait_for_completion_timeout(&tm_done,
1498 msecs_to_jiffies
1499 (fnic->config.ed_tov));
1500
1501 /* Recheck cmd state to check if it is now aborted */
1502 spin_lock_irqsave(io_lock, flags);
1503 io_req = (struct fnic_io_req *)CMD_SP(sc);
1504 if (!io_req) {
1505 spin_unlock_irqrestore(io_lock, flags);
1506 ret = 1;
1507 goto clean_pending_aborts_end;
1508 }
1509
1510 io_req->abts_done = NULL;
1511
1512 /* if abort is still pending with fw, fail */
1513 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1514 spin_unlock_irqrestore(io_lock, flags);
1515 ret = 1;
1516 goto clean_pending_aborts_end;
1517 }
1518 CMD_SP(sc) = NULL;
1519 spin_unlock_irqrestore(io_lock, flags);
1520
1521 fnic_release_ioreq_buf(fnic, io_req, sc);
1522 mempool_free(io_req, fnic->io_req_pool);
1523 }
1524
1525clean_pending_aborts_end:
1526 return ret;
1527}
1528
1529/*
1530 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1531 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
1532 * on the LUN.
1533 */
1534int fnic_device_reset(struct scsi_cmnd *sc)
1535{
1536 struct fc_lport *lp;
1537 struct fnic *fnic;
1538 struct fnic_io_req *io_req;
1539 struct fc_rport *rport;
1540 int status;
1541 int ret = FAILED;
1542 spinlock_t *io_lock;
1543 unsigned long flags;
1544 DECLARE_COMPLETION_ONSTACK(tm_done);
1545
1546 /* Wait for rport to unblock */
1547 fnic_block_error_handler(sc);
1548
1549 /* Get local-port, check ready and link up */
1550 lp = shost_priv(sc->device->host);
1551
1552 fnic = lport_priv(lp);
1553 FNIC_SCSI_DBG(KERN_DEBUG,
1554 fnic->lport->host,
1555 "Device reset called FCID 0x%x, LUN 0x%x\n",
1556 (starget_to_rport(scsi_target(sc->device)))->port_id,
1557 sc->device->lun);
1558
1559
1560 if (lp->state != LPORT_ST_READY || !(lp->link_up))
1561 goto fnic_device_reset_end;
1562
1563 /* Check if remote port up */
1564 rport = starget_to_rport(scsi_target(sc->device));
1565 if (fc_remote_port_chkready(rport))
1566 goto fnic_device_reset_end;
1567
1568 io_lock = fnic_io_lock_hash(fnic, sc);
1569 spin_lock_irqsave(io_lock, flags);
1570 io_req = (struct fnic_io_req *)CMD_SP(sc);
1571
1572 /*
1573 * If there is a io_req attached to this command, then use it,
1574 * else allocate a new one.
1575 */
1576 if (!io_req) {
1577 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
1578 if (!io_req) {
1579 spin_unlock_irqrestore(io_lock, flags);
1580 goto fnic_device_reset_end;
1581 }
1582 memset(io_req, 0, sizeof(*io_req));
1583 io_req->port_id = rport->port_id;
1584 CMD_SP(sc) = (char *)io_req;
1585 }
1586 io_req->dr_done = &tm_done;
1587 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
1588 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
1589 spin_unlock_irqrestore(io_lock, flags);
1590
1591 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
1592 sc->request->tag);
1593
1594 /*
1595 * issue the device reset, if enqueue failed, clean up the ioreq
1596 * and break assoc with scsi cmd
1597 */
1598 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
1599 spin_lock_irqsave(io_lock, flags);
1600 io_req = (struct fnic_io_req *)CMD_SP(sc);
1601 if (io_req)
1602 io_req->dr_done = NULL;
1603 goto fnic_device_reset_clean;
1604 }
1605
1606 /*
1607 * Wait on the local completion for LUN reset. The io_req may be
1608 * freed while we wait since we hold no lock.
1609 */
1610 wait_for_completion_timeout(&tm_done,
1611 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
1612
1613 spin_lock_irqsave(io_lock, flags);
1614 io_req = (struct fnic_io_req *)CMD_SP(sc);
1615 if (!io_req) {
1616 spin_unlock_irqrestore(io_lock, flags);
1617 goto fnic_device_reset_end;
1618 }
1619 io_req->dr_done = NULL;
1620
1621 status = CMD_LR_STATUS(sc);
1622 spin_unlock_irqrestore(io_lock, flags);
1623
1624 /*
1625 * If lun reset not completed, bail out with failed. io_req
1626 * gets cleaned up during higher levels of EH
1627 */
1628 if (status == FCPIO_INVALID_CODE) {
1629 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1630 "Device reset timed out\n");
1631 goto fnic_device_reset_end;
1632 }
1633
1634 /* Completed, but not successful, clean up the io_req, return fail */
1635 if (status != FCPIO_SUCCESS) {
1636 spin_lock_irqsave(io_lock, flags);
1637 FNIC_SCSI_DBG(KERN_DEBUG,
1638 fnic->lport->host,
1639 "Device reset completed - failed\n");
1640 io_req = (struct fnic_io_req *)CMD_SP(sc);
1641 goto fnic_device_reset_clean;
1642 }
1643
1644 /*
1645 * Clean up any aborts on this lun that have still not
1646 * completed. If any of these fail, then LUN reset fails.
1647 * clean_pending_aborts cleans all cmds on this lun except
1648 * the lun reset cmd. If all cmds get cleaned, the lun reset
1649 * succeeds
1650 */
1651 if (fnic_clean_pending_aborts(fnic, sc)) {
1652 spin_lock_irqsave(io_lock, flags);
1653 io_req = (struct fnic_io_req *)CMD_SP(sc);
1654 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1655 "Device reset failed"
1656 " since could not abort all IOs\n");
1657 goto fnic_device_reset_clean;
1658 }
1659
1660 /* Clean lun reset command */
1661 spin_lock_irqsave(io_lock, flags);
1662 io_req = (struct fnic_io_req *)CMD_SP(sc);
1663 if (io_req)
1664 /* Completed, and successful */
1665 ret = SUCCESS;
1666
1667fnic_device_reset_clean:
1668 if (io_req)
1669 CMD_SP(sc) = NULL;
1670
1671 spin_unlock_irqrestore(io_lock, flags);
1672
1673 if (io_req) {
1674 fnic_release_ioreq_buf(fnic, io_req, sc);
1675 mempool_free(io_req, fnic->io_req_pool);
1676 }
1677
1678fnic_device_reset_end:
1679 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1680 "Returning from device reset %s\n",
1681 (ret == SUCCESS) ?
1682 "SUCCESS" : "FAILED");
1683 return ret;
1684}
1685
1686/* Clean up all IOs, clean up libFC local port */
1687int fnic_reset(struct Scsi_Host *shost)
1688{
1689 struct fc_lport *lp;
1690 struct fnic *fnic;
1691 int ret = SUCCESS;
1692
1693 lp = shost_priv(shost);
1694 fnic = lport_priv(lp);
1695
1696 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1697 "fnic_reset called\n");
1698
1699 /*
1700 * Reset local port, this will clean up libFC exchanges,
1701 * reset remote port sessions, and if link is up, begin flogi
1702 */
1703 if (lp->tt.lport_reset(lp))
1704 ret = FAILED;
1705
1706 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1707 "Returning from fnic reset %s\n",
1708 (ret == SUCCESS) ?
1709 "SUCCESS" : "FAILED");
1710
1711 return ret;
1712}
1713
1714/*
1715 * SCSI Error handling calls driver's eh_host_reset if all prior
1716 * error handling levels return FAILED. If host reset completes
1717 * successfully, and if link is up, then Fabric login begins.
1718 *
1719 * Host Reset is the highest level of error recovery. If this fails, then
1720 * host is offlined by SCSI.
1721 *
1722 */
1723int fnic_host_reset(struct scsi_cmnd *sc)
1724{
1725 int ret;
1726 unsigned long wait_host_tmo;
1727 struct Scsi_Host *shost = sc->device->host;
1728 struct fc_lport *lp = shost_priv(shost);
1729
1730 /*
1731 * If fnic_reset is successful, wait for fabric login to complete
1732 * scsi-ml tries to send a TUR to every device if host reset is
1733 * successful, so before returning to scsi, fabric should be up
1734 */
1735 ret = fnic_reset(shost);
1736 if (ret == SUCCESS) {
1737 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
1738 ret = FAILED;
1739 while (time_before(jiffies, wait_host_tmo)) {
1740 if ((lp->state == LPORT_ST_READY) &&
1741 (lp->link_up)) {
1742 ret = SUCCESS;
1743 break;
1744 }
1745 ssleep(1);
1746 }
1747 }
1748
1749 return ret;
1750}
1751
1752/*
1753 * This fxn is called from libFC when host is removed
1754 */
1755void fnic_scsi_abort_io(struct fc_lport *lp)
1756{
1757 int err = 0;
1758 unsigned long flags;
1759 enum fnic_state old_state;
1760 struct fnic *fnic = lport_priv(lp);
1761 DECLARE_COMPLETION_ONSTACK(remove_wait);
1762
1763 /* Issue firmware reset for fnic, wait for reset to complete */
1764 spin_lock_irqsave(&fnic->fnic_lock, flags);
1765 fnic->remove_wait = &remove_wait;
1766 old_state = fnic->state;
1767 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1768 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1769 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1770
1771 err = fnic_fw_reset_handler(fnic);
1772 if (err) {
1773 spin_lock_irqsave(&fnic->fnic_lock, flags);
1774 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1775 fnic->state = old_state;
1776 fnic->remove_wait = NULL;
1777 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1778 return;
1779 }
1780
1781 /* Wait for firmware reset to complete */
1782 wait_for_completion_timeout(&remove_wait,
1783 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
1784
1785 spin_lock_irqsave(&fnic->fnic_lock, flags);
1786 fnic->remove_wait = NULL;
1787 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1788 "fnic_scsi_abort_io %s\n",
1789 (fnic->state == FNIC_IN_ETH_MODE) ?
1790 "SUCCESS" : "FAILED");
1791 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1792
1793}
1794
1795/*
1796 * This fxn called from libFC to clean up driver IO state on link down
1797 */
1798void fnic_scsi_cleanup(struct fc_lport *lp)
1799{
1800 unsigned long flags;
1801 enum fnic_state old_state;
1802 struct fnic *fnic = lport_priv(lp);
1803
1804 /* issue fw reset */
1805 spin_lock_irqsave(&fnic->fnic_lock, flags);
1806 old_state = fnic->state;
1807 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1808 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1809 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1810
1811 if (fnic_fw_reset_handler(fnic)) {
1812 spin_lock_irqsave(&fnic->fnic_lock, flags);
1813 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1814 fnic->state = old_state;
1815 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1816 }
1817
1818}
1819
1820void fnic_empty_scsi_cleanup(struct fc_lport *lp)
1821{
1822}
1823
1824void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
1825{
1826 struct fnic *fnic = lport_priv(lp);
1827
1828 /* Non-zero sid, nothing to do */
1829 if (sid)
1830 goto call_fc_exch_mgr_reset;
1831
1832 if (did) {
1833 fnic_rport_exch_reset(fnic, did);
1834 goto call_fc_exch_mgr_reset;
1835 }
1836
1837 /*
1838 * sid = 0, did = 0
1839 * link down or device being removed
1840 */
1841 if (!fnic->in_remove)
1842 fnic_scsi_cleanup(lp);
1843 else
1844 fnic_scsi_abort_io(lp);
1845
1846 /* call libFC exch mgr reset to reset its exchanges */
1847call_fc_exch_mgr_reset:
1848 fc_exch_mgr_reset(lp, sid, did);
1849
1850}
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 00000000000..92e80ae6b72
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _RQ_ENET_DESC_H_
19#define _RQ_ENET_DESC_H_
20
21/* Ethernet receive queue descriptor: 16B */
22struct rq_enet_desc {
23 __le64 address;
24 __le16 length_type;
25 u8 reserved[6];
26};
27
28enum rq_enet_type_types {
29 RQ_ENET_TYPE_ONLY_SOP = 0,
30 RQ_ENET_TYPE_NOT_SOP = 1,
31 RQ_ENET_TYPE_RESV2 = 2,
32 RQ_ENET_TYPE_RESV3 = 3,
33};
34
35#define RQ_ENET_ADDR_BITS 64
36#define RQ_ENET_LEN_BITS 14
37#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
38#define RQ_ENET_TYPE_BITS 2
39#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
40
41static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
42 u64 address, u8 type, u16 length)
43{
44 desc->address = cpu_to_le64(address);
45 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
46 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
47}
48
49static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
50 u64 *address, u8 *type, u16 *length)
51{
52 *address = le64_to_cpu(desc->address);
53 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
54 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
55 RQ_ENET_TYPE_MASK);
56}
57
58#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 00000000000..c5db32eda5e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "vnic_dev.h"
22#include "vnic_cq.h"
23
24void vnic_cq_free(struct vnic_cq *cq)
25{
26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
27
28 cq->ctrl = NULL;
29}
30
31int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
32 unsigned int desc_count, unsigned int desc_size)
33{
34 int err;
35
36 cq->index = index;
37 cq->vdev = vdev;
38
39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
40 if (!cq->ctrl) {
41 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
42 return -EINVAL;
43 }
44
45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
46 if (err)
47 return err;
48
49 return 0;
50}
51
52void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
53 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
54 unsigned int cq_tail_color, unsigned int interrupt_enable,
55 unsigned int cq_entry_enable, unsigned int cq_message_enable,
56 unsigned int interrupt_offset, u64 cq_message_addr)
57{
58 u64 paddr;
59
60 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
61 writeq(paddr, &cq->ctrl->ring_base);
62 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
63 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
64 iowrite32(color_enable, &cq->ctrl->color_enable);
65 iowrite32(cq_head, &cq->ctrl->cq_head);
66 iowrite32(cq_tail, &cq->ctrl->cq_tail);
67 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
68 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
69 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
70 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
71 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
72 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
73}
74
75void vnic_cq_clean(struct vnic_cq *cq)
76{
77 cq->to_clean = 0;
78 cq->last_color = 0;
79
80 iowrite32(0, &cq->ctrl->cq_head);
81 iowrite32(0, &cq->ctrl->cq_tail);
82 iowrite32(1, &cq->ctrl->cq_tail_color);
83
84 vnic_dev_clear_desc_ring(&cq->ring);
85}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 00000000000..4ede6809fb1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_H_
19#define _VNIC_CQ_H_
20
21#include "cq_desc.h"
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_cq_service fnic_cq_service
29#define vnic_cq_free fnic_cq_free
30#define vnic_cq_alloc fnic_cq_alloc
31#define vnic_cq_init fnic_cq_init
32#define vnic_cq_clean fnic_cq_clean
33
34/* Completion queue control */
35struct vnic_cq_ctrl {
36 u64 ring_base; /* 0x00 */
37 u32 ring_size; /* 0x08 */
38 u32 pad0;
39 u32 flow_control_enable; /* 0x10 */
40 u32 pad1;
41 u32 color_enable; /* 0x18 */
42 u32 pad2;
43 u32 cq_head; /* 0x20 */
44 u32 pad3;
45 u32 cq_tail; /* 0x28 */
46 u32 pad4;
47 u32 cq_tail_color; /* 0x30 */
48 u32 pad5;
49 u32 interrupt_enable; /* 0x38 */
50 u32 pad6;
51 u32 cq_entry_enable; /* 0x40 */
52 u32 pad7;
53 u32 cq_message_enable; /* 0x48 */
54 u32 pad8;
55 u32 interrupt_offset; /* 0x50 */
56 u32 pad9;
57 u64 cq_message_addr; /* 0x58 */
58 u32 pad10;
59};
60
61struct vnic_cq {
62 unsigned int index;
63 struct vnic_dev *vdev;
64 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
65 struct vnic_dev_ring ring;
66 unsigned int to_clean;
67 unsigned int last_color;
68};
69
70static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
71 unsigned int work_to_do,
72 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
73 u8 type, u16 q_number, u16 completed_index, void *opaque),
74 void *opaque)
75{
76 struct cq_desc *cq_desc;
77 unsigned int work_done = 0;
78 u16 q_number, completed_index;
79 u8 type, color;
80
81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
82 cq->ring.desc_size * cq->to_clean);
83 cq_desc_dec(cq_desc, &type, &color,
84 &q_number, &completed_index);
85
86 while (color != cq->last_color) {
87
88 if ((*q_service)(cq->vdev, cq_desc, type,
89 q_number, completed_index, opaque))
90 break;
91
92 cq->to_clean++;
93 if (cq->to_clean == cq->ring.desc_count) {
94 cq->to_clean = 0;
95 cq->last_color = cq->last_color ? 0 : 1;
96 }
97
98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
99 cq->ring.desc_size * cq->to_clean);
100 cq_desc_dec(cq_desc, &type, &color,
101 &q_number, &completed_index);
102
103 work_done++;
104 if (work_done >= work_to_do)
105 break;
106 }
107
108 return work_done;
109}
110
111void vnic_cq_free(struct vnic_cq *cq);
112int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
113 unsigned int desc_count, unsigned int desc_size);
114void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
115 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
116 unsigned int cq_tail_color, unsigned int interrupt_enable,
117 unsigned int cq_entry_enable, unsigned int message_enable,
118 unsigned int interrupt_offset, u64 message_addr);
119void vnic_cq_clean(struct vnic_cq *cq);
120
121#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 00000000000..7901ce255a8
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_COPY_H_
19#define _VNIC_CQ_COPY_H_
20
21#include "fcpio.h"
22
23static inline unsigned int vnic_cq_copy_service(
24 struct vnic_cq *cq,
25 int (*q_service)(struct vnic_dev *vdev,
26 unsigned int index,
27 struct fcpio_fw_req *desc),
28 unsigned int work_to_do)
29
30{
31 struct fcpio_fw_req *desc;
32 unsigned int work_done = 0;
33 u8 color;
34
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
36 cq->ring.desc_size * cq->to_clean);
37 fcpio_color_dec(desc, &color);
38
39 while (color != cq->last_color) {
40
41 if ((*q_service)(cq->vdev, cq->index, desc))
42 break;
43
44 cq->to_clean++;
45 if (cq->to_clean == cq->ring.desc_count) {
46 cq->to_clean = 0;
47 cq->last_color = cq->last_color ? 0 : 1;
48 }
49
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
51 cq->ring.desc_size * cq->to_clean);
52 fcpio_color_dec(desc, &color);
53
54 work_done++;
55 if (work_done >= work_to_do)
56 break;
57 }
58
59 return work_done;
60}
61
62#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 00000000000..56677064508
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,690 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/if_ether.h>
25#include "vnic_resource.h"
26#include "vnic_devcmd.h"
27#include "vnic_dev.h"
28#include "vnic_stats.h"
29
30struct vnic_res {
31 void __iomem *vaddr;
32 unsigned int count;
33};
34
35struct vnic_dev {
36 void *priv;
37 struct pci_dev *pdev;
38 struct vnic_res res[RES_TYPE_MAX];
39 enum vnic_dev_intr_mode intr_mode;
40 struct vnic_devcmd __iomem *devcmd;
41 struct vnic_devcmd_notify *notify;
42 struct vnic_devcmd_notify notify_copy;
43 dma_addr_t notify_pa;
44 u32 *linkstatus;
45 dma_addr_t linkstatus_pa;
46 struct vnic_stats *stats;
47 dma_addr_t stats_pa;
48 struct vnic_devcmd_fw_info *fw_info;
49 dma_addr_t fw_info_pa;
50};
51
52#define VNIC_MAX_RES_HDR_SIZE \
53 (sizeof(struct vnic_resource_header) + \
54 sizeof(struct vnic_resource) * RES_TYPE_MAX)
55#define VNIC_RES_STRIDE 128
56
57void *vnic_dev_priv(struct vnic_dev *vdev)
58{
59 return vdev->priv;
60}
61
62static int vnic_dev_discover_res(struct vnic_dev *vdev,
63 struct vnic_dev_bar *bar)
64{
65 struct vnic_resource_header __iomem *rh;
66 struct vnic_resource __iomem *r;
67 u8 type;
68
69 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
70 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
71 return -EINVAL;
72 }
73
74 rh = bar->vaddr;
75 if (!rh) {
76 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
77 return -EINVAL;
78 }
79
80 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
81 ioread32(&rh->version) != VNIC_RES_VERSION) {
82 printk(KERN_ERR "vNIC BAR0 res magic/version error "
83 "exp (%lx/%lx) curr (%x/%x)\n",
84 VNIC_RES_MAGIC, VNIC_RES_VERSION,
85 ioread32(&rh->magic), ioread32(&rh->version));
86 return -EINVAL;
87 }
88
89 r = (struct vnic_resource __iomem *)(rh + 1);
90
91 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
92
93 u8 bar_num = ioread8(&r->bar);
94 u32 bar_offset = ioread32(&r->bar_offset);
95 u32 count = ioread32(&r->count);
96 u32 len;
97
98 r++;
99
100 if (bar_num != 0) /* only mapping in BAR0 resources */
101 continue;
102
103 switch (type) {
104 case RES_TYPE_WQ:
105 case RES_TYPE_RQ:
106 case RES_TYPE_CQ:
107 case RES_TYPE_INTR_CTRL:
108 /* each count is stride bytes long */
109 len = count * VNIC_RES_STRIDE;
110 if (len + bar_offset > bar->len) {
111 printk(KERN_ERR "vNIC BAR0 resource %d "
112 "out-of-bounds, offset 0x%x + "
113 "size 0x%x > bar len 0x%lx\n",
114 type, bar_offset,
115 len,
116 bar->len);
117 return -EINVAL;
118 }
119 break;
120 case RES_TYPE_INTR_PBA_LEGACY:
121 case RES_TYPE_DEVCMD:
122 len = count;
123 break;
124 default:
125 continue;
126 }
127
128 vdev->res[type].count = count;
129 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
130 }
131
132 return 0;
133}
134
135unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
136 enum vnic_res_type type)
137{
138 return vdev->res[type].count;
139}
140
141void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
142 unsigned int index)
143{
144 if (!vdev->res[type].vaddr)
145 return NULL;
146
147 switch (type) {
148 case RES_TYPE_WQ:
149 case RES_TYPE_RQ:
150 case RES_TYPE_CQ:
151 case RES_TYPE_INTR_CTRL:
152 return (char __iomem *)vdev->res[type].vaddr +
153 index * VNIC_RES_STRIDE;
154 default:
155 return (char __iomem *)vdev->res[type].vaddr;
156 }
157}
158
159unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
160 unsigned int desc_count,
161 unsigned int desc_size)
162{
163 /* The base address of the desc rings must be 512 byte aligned.
164 * Descriptor count is aligned to groups of 32 descriptors. A
165 * count of 0 means the maximum 4096 descriptors. Descriptor
166 * size is aligned to 16 bytes.
167 */
168
169 unsigned int count_align = 32;
170 unsigned int desc_align = 16;
171
172 ring->base_align = 512;
173
174 if (desc_count == 0)
175 desc_count = 4096;
176
177 ring->desc_count = ALIGN(desc_count, count_align);
178
179 ring->desc_size = ALIGN(desc_size, desc_align);
180
181 ring->size = ring->desc_count * ring->desc_size;
182 ring->size_unaligned = ring->size + ring->base_align;
183
184 return ring->size_unaligned;
185}
186
187void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
188{
189 memset(ring->descs, 0, ring->size);
190}
191
192int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
193 unsigned int desc_count, unsigned int desc_size)
194{
195 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
196
197 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
198 ring->size_unaligned,
199 &ring->base_addr_unaligned);
200
201 if (!ring->descs_unaligned) {
202 printk(KERN_ERR
203 "Failed to allocate ring (size=%d), aborting\n",
204 (int)ring->size);
205 return -ENOMEM;
206 }
207
208 ring->base_addr = ALIGN(ring->base_addr_unaligned,
209 ring->base_align);
210 ring->descs = (u8 *)ring->descs_unaligned +
211 (ring->base_addr - ring->base_addr_unaligned);
212
213 vnic_dev_clear_desc_ring(ring);
214
215 ring->desc_avail = ring->desc_count - 1;
216
217 return 0;
218}
219
220void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
221{
222 if (ring->descs) {
223 pci_free_consistent(vdev->pdev,
224 ring->size_unaligned,
225 ring->descs_unaligned,
226 ring->base_addr_unaligned);
227 ring->descs = NULL;
228 }
229}
230
231int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
232 u64 *a0, u64 *a1, int wait)
233{
234 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
235 int delay;
236 u32 status;
237 int dev_cmd_err[] = {
238 /* convert from fw's version of error.h to host's version */
239 0, /* ERR_SUCCESS */
240 EINVAL, /* ERR_EINVAL */
241 EFAULT, /* ERR_EFAULT */
242 EPERM, /* ERR_EPERM */
243 EBUSY, /* ERR_EBUSY */
244 };
245 int err;
246
247 status = ioread32(&devcmd->status);
248 if (status & STAT_BUSY) {
249 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
250 return -EBUSY;
251 }
252
253 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
254 writeq(*a0, &devcmd->args[0]);
255 writeq(*a1, &devcmd->args[1]);
256 wmb();
257 }
258
259 iowrite32(cmd, &devcmd->cmd);
260
261 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
262 return 0;
263
264 for (delay = 0; delay < wait; delay++) {
265
266 udelay(100);
267
268 status = ioread32(&devcmd->status);
269 if (!(status & STAT_BUSY)) {
270
271 if (status & STAT_ERROR) {
272 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
273 printk(KERN_ERR "Error %d devcmd %d\n",
274 err, _CMD_N(cmd));
275 return -err;
276 }
277
278 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
279 rmb();
280 *a0 = readq(&devcmd->args[0]);
281 *a1 = readq(&devcmd->args[1]);
282 }
283
284 return 0;
285 }
286 }
287
288 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
289 return -ETIMEDOUT;
290}
291
292int vnic_dev_fw_info(struct vnic_dev *vdev,
293 struct vnic_devcmd_fw_info **fw_info)
294{
295 u64 a0, a1 = 0;
296 int wait = 1000;
297 int err = 0;
298
299 if (!vdev->fw_info) {
300 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
301 sizeof(struct vnic_devcmd_fw_info),
302 &vdev->fw_info_pa);
303 if (!vdev->fw_info)
304 return -ENOMEM;
305
306 a0 = vdev->fw_info_pa;
307
308 /* only get fw_info once and cache it */
309 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
310 }
311
312 *fw_info = vdev->fw_info;
313
314 return err;
315}
316
317int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
318 void *value)
319{
320 u64 a0, a1;
321 int wait = 1000;
322 int err;
323
324 a0 = offset;
325 a1 = size;
326
327 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
328
329 switch (size) {
330 case 1:
331 *(u8 *)value = (u8)a0;
332 break;
333 case 2:
334 *(u16 *)value = (u16)a0;
335 break;
336 case 4:
337 *(u32 *)value = (u32)a0;
338 break;
339 case 8:
340 *(u64 *)value = a0;
341 break;
342 default:
343 BUG();
344 break;
345 }
346
347 return err;
348}
349
350int vnic_dev_stats_clear(struct vnic_dev *vdev)
351{
352 u64 a0 = 0, a1 = 0;
353 int wait = 1000;
354 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
355}
356
357int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
358{
359 u64 a0, a1;
360 int wait = 1000;
361
362 if (!vdev->stats) {
363 vdev->stats = pci_alloc_consistent(vdev->pdev,
364 sizeof(struct vnic_stats), &vdev->stats_pa);
365 if (!vdev->stats)
366 return -ENOMEM;
367 }
368
369 *stats = vdev->stats;
370 a0 = vdev->stats_pa;
371 a1 = sizeof(struct vnic_stats);
372
373 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
374}
375
376int vnic_dev_close(struct vnic_dev *vdev)
377{
378 u64 a0 = 0, a1 = 0;
379 int wait = 1000;
380 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
381}
382
383int vnic_dev_enable(struct vnic_dev *vdev)
384{
385 u64 a0 = 0, a1 = 0;
386 int wait = 1000;
387 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
388}
389
390int vnic_dev_disable(struct vnic_dev *vdev)
391{
392 u64 a0 = 0, a1 = 0;
393 int wait = 1000;
394 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
395}
396
397int vnic_dev_open(struct vnic_dev *vdev, int arg)
398{
399 u64 a0 = (u32)arg, a1 = 0;
400 int wait = 1000;
401 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
402}
403
404int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
405{
406 u64 a0 = 0, a1 = 0;
407 int wait = 1000;
408 int err;
409
410 *done = 0;
411
412 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
413 if (err)
414 return err;
415
416 *done = (a0 == 0);
417
418 return 0;
419}
420
421int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
422{
423 u64 a0 = (u32)arg, a1 = 0;
424 int wait = 1000;
425 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
426}
427
428int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
429{
430 u64 a0 = 0, a1 = 0;
431 int wait = 1000;
432 int err;
433
434 *done = 0;
435
436 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
437 if (err)
438 return err;
439
440 *done = (a0 == 0);
441
442 return 0;
443}
444
445int vnic_dev_hang_notify(struct vnic_dev *vdev)
446{
447 u64 a0, a1;
448 int wait = 1000;
449 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
450}
451
452int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
453{
454 u64 a0, a1;
455 int wait = 1000;
456 int err, i;
457
458 for (i = 0; i < ETH_ALEN; i++)
459 mac_addr[i] = 0;
460
461 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
462 if (err)
463 return err;
464
465 for (i = 0; i < ETH_ALEN; i++)
466 mac_addr[i] = ((u8 *)&a0)[i];
467
468 return 0;
469}
470
471void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
472 int broadcast, int promisc, int allmulti)
473{
474 u64 a0, a1 = 0;
475 int wait = 1000;
476 int err;
477
478 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
479 (multicast ? CMD_PFILTER_MULTICAST : 0) |
480 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
481 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
482 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
483
484 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
485 if (err)
486 printk(KERN_ERR "Can't set packet filter\n");
487}
488
489void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
490{
491 u64 a0 = 0, a1 = 0;
492 int wait = 1000;
493 int err;
494 int i;
495
496 for (i = 0; i < ETH_ALEN; i++)
497 ((u8 *)&a0)[i] = addr[i];
498
499 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
500 if (err)
501 printk(KERN_ERR
502 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
503 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
504 err);
505}
506
507void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
508{
509 u64 a0 = 0, a1 = 0;
510 int wait = 1000;
511 int err;
512 int i;
513
514 for (i = 0; i < ETH_ALEN; i++)
515 ((u8 *)&a0)[i] = addr[i];
516
517 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
518 if (err)
519 printk(KERN_ERR
520 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
521 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
522 err);
523}
524
525int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
526{
527 u64 a0, a1;
528 int wait = 1000;
529
530 if (!vdev->notify) {
531 vdev->notify = pci_alloc_consistent(vdev->pdev,
532 sizeof(struct vnic_devcmd_notify),
533 &vdev->notify_pa);
534 if (!vdev->notify)
535 return -ENOMEM;
536 }
537
538 a0 = vdev->notify_pa;
539 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
540 a1 += sizeof(struct vnic_devcmd_notify);
541
542 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
543}
544
545void vnic_dev_notify_unset(struct vnic_dev *vdev)
546{
547 u64 a0, a1;
548 int wait = 1000;
549
550 a0 = 0; /* paddr = 0 to unset notify buffer */
551 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
552 a1 += sizeof(struct vnic_devcmd_notify);
553
554 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
555}
556
557static int vnic_dev_notify_ready(struct vnic_dev *vdev)
558{
559 u32 *words;
560 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
561 unsigned int i;
562 u32 csum;
563
564 if (!vdev->notify)
565 return 0;
566
567 do {
568 csum = 0;
569 memcpy(&vdev->notify_copy, vdev->notify,
570 sizeof(struct vnic_devcmd_notify));
571 words = (u32 *)&vdev->notify_copy;
572 for (i = 1; i < nwords; i++)
573 csum += words[i];
574 } while (csum != words[0]);
575
576 return 1;
577}
578
579int vnic_dev_init(struct vnic_dev *vdev, int arg)
580{
581 u64 a0 = (u32)arg, a1 = 0;
582 int wait = 1000;
583 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
584}
585
586int vnic_dev_link_status(struct vnic_dev *vdev)
587{
588 if (vdev->linkstatus)
589 return *vdev->linkstatus;
590
591 if (!vnic_dev_notify_ready(vdev))
592 return 0;
593
594 return vdev->notify_copy.link_state;
595}
596
597u32 vnic_dev_port_speed(struct vnic_dev *vdev)
598{
599 if (!vnic_dev_notify_ready(vdev))
600 return 0;
601
602 return vdev->notify_copy.port_speed;
603}
604
605u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
606{
607 if (!vnic_dev_notify_ready(vdev))
608 return 0;
609
610 return vdev->notify_copy.msglvl;
611}
612
613u32 vnic_dev_mtu(struct vnic_dev *vdev)
614{
615 if (!vnic_dev_notify_ready(vdev))
616 return 0;
617
618 return vdev->notify_copy.mtu;
619}
620
621u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
622{
623 if (!vnic_dev_notify_ready(vdev))
624 return 0;
625
626 return vdev->notify_copy.link_down_cnt;
627}
628
629void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
630 enum vnic_dev_intr_mode intr_mode)
631{
632 vdev->intr_mode = intr_mode;
633}
634
635enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
636 struct vnic_dev *vdev)
637{
638 return vdev->intr_mode;
639}
640
641void vnic_dev_unregister(struct vnic_dev *vdev)
642{
643 if (vdev) {
644 if (vdev->notify)
645 pci_free_consistent(vdev->pdev,
646 sizeof(struct vnic_devcmd_notify),
647 vdev->notify,
648 vdev->notify_pa);
649 if (vdev->linkstatus)
650 pci_free_consistent(vdev->pdev,
651 sizeof(u32),
652 vdev->linkstatus,
653 vdev->linkstatus_pa);
654 if (vdev->stats)
655 pci_free_consistent(vdev->pdev,
656 sizeof(struct vnic_dev),
657 vdev->stats, vdev->stats_pa);
658 if (vdev->fw_info)
659 pci_free_consistent(vdev->pdev,
660 sizeof(struct vnic_devcmd_fw_info),
661 vdev->fw_info, vdev->fw_info_pa);
662 kfree(vdev);
663 }
664}
665
666struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
667 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
668{
669 if (!vdev) {
670 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
671 if (!vdev)
672 return NULL;
673 }
674
675 vdev->priv = priv;
676 vdev->pdev = pdev;
677
678 if (vnic_dev_discover_res(vdev, bar))
679 goto err_out;
680
681 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
682 if (!vdev->devcmd)
683 goto err_out;
684
685 return vdev;
686
687err_out:
688 vnic_dev_unregister(vdev);
689 return NULL;
690}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 00000000000..f9935a8a5a0
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEV_H_
19#define _VNIC_DEV_H_
20
21#include "vnic_resource.h"
22#include "vnic_devcmd.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_dev_priv fnic_dev_priv
29#define vnic_dev_get_res_count fnic_dev_get_res_count
30#define vnic_dev_get_res fnic_dev_get_res
31#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
32#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
33#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
34#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
35#define vnic_dev_cmd fnic_dev_cmd
36#define vnic_dev_fw_info fnic_dev_fw_info
37#define vnic_dev_spec fnic_dev_spec
38#define vnic_dev_stats_clear fnic_dev_stats_clear
39#define vnic_dev_stats_dump fnic_dev_stats_dump
40#define vnic_dev_hang_notify fnic_dev_hang_notify
41#define vnic_dev_packet_filter fnic_dev_packet_filter
42#define vnic_dev_add_addr fnic_dev_add_addr
43#define vnic_dev_del_addr fnic_dev_del_addr
44#define vnic_dev_mac_addr fnic_dev_mac_addr
45#define vnic_dev_notify_set fnic_dev_notify_set
46#define vnic_dev_notify_unset fnic_dev_notify_unset
47#define vnic_dev_link_status fnic_dev_link_status
48#define vnic_dev_port_speed fnic_dev_port_speed
49#define vnic_dev_msg_lvl fnic_dev_msg_lvl
50#define vnic_dev_mtu fnic_dev_mtu
51#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
52#define vnic_dev_close fnic_dev_close
53#define vnic_dev_enable fnic_dev_enable
54#define vnic_dev_disable fnic_dev_disable
55#define vnic_dev_open fnic_dev_open
56#define vnic_dev_open_done fnic_dev_open_done
57#define vnic_dev_init fnic_dev_init
58#define vnic_dev_soft_reset fnic_dev_soft_reset
59#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
60#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
61#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
62#define vnic_dev_unregister fnic_dev_unregister
63#define vnic_dev_register fnic_dev_register
64
65#ifndef VNIC_PADDR_TARGET
66#define VNIC_PADDR_TARGET 0x0000000000000000ULL
67#endif
68
69#ifndef readq
70static inline u64 readq(void __iomem *reg)
71{
72 return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
73}
74
75static inline void writeq(u64 val, void __iomem *reg)
76{
77 writel(val & 0xffffffff, reg);
78 writel(val >> 32, reg + 0x4UL);
79}
80#endif
81
82enum vnic_dev_intr_mode {
83 VNIC_DEV_INTR_MODE_UNKNOWN,
84 VNIC_DEV_INTR_MODE_INTX,
85 VNIC_DEV_INTR_MODE_MSI,
86 VNIC_DEV_INTR_MODE_MSIX,
87};
88
89struct vnic_dev_bar {
90 void __iomem *vaddr;
91 dma_addr_t bus_addr;
92 unsigned long len;
93};
94
95struct vnic_dev_ring {
96 void *descs;
97 size_t size;
98 dma_addr_t base_addr;
99 size_t base_align;
100 void *descs_unaligned;
101 size_t size_unaligned;
102 dma_addr_t base_addr_unaligned;
103 unsigned int desc_size;
104 unsigned int desc_count;
105 unsigned int desc_avail;
106};
107
108struct vnic_dev;
109struct vnic_stats;
110
111void *vnic_dev_priv(struct vnic_dev *vdev);
112unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
113 enum vnic_res_type type);
114void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
115 unsigned int index);
116unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
117 unsigned int desc_count,
118 unsigned int desc_size);
119void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
120int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
121 unsigned int desc_count, unsigned int desc_size);
122void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
123 struct vnic_dev_ring *ring);
124int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
125 u64 *a0, u64 *a1, int wait);
126int vnic_dev_fw_info(struct vnic_dev *vdev,
127 struct vnic_devcmd_fw_info **fw_info);
128int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
129 unsigned int size, void *value);
130int vnic_dev_stats_clear(struct vnic_dev *vdev);
131int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
132int vnic_dev_hang_notify(struct vnic_dev *vdev);
133void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
134 int broadcast, int promisc, int allmulti);
135void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
136void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
137int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
138int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
139void vnic_dev_notify_unset(struct vnic_dev *vdev);
140int vnic_dev_link_status(struct vnic_dev *vdev);
141u32 vnic_dev_port_speed(struct vnic_dev *vdev);
142u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
143u32 vnic_dev_mtu(struct vnic_dev *vdev);
144u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
145int vnic_dev_close(struct vnic_dev *vdev);
146int vnic_dev_enable(struct vnic_dev *vdev);
147int vnic_dev_disable(struct vnic_dev *vdev);
148int vnic_dev_open(struct vnic_dev *vdev, int arg);
149int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
150int vnic_dev_init(struct vnic_dev *vdev, int arg);
151int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
152int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
153void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
154 enum vnic_dev_intr_mode intr_mode);
155enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
156void vnic_dev_unregister(struct vnic_dev *vdev);
157struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
158 void *priv, struct pci_dev *pdev,
159 struct vnic_dev_bar *bar);
160
161#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 00000000000..d62b9061bf1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,281 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEVCMD_H_
19#define _VNIC_DEVCMD_H_
20
21#define _CMD_NBITS 14
22#define _CMD_VTYPEBITS 10
23#define _CMD_FLAGSBITS 6
24#define _CMD_DIRBITS 2
25
26#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
27#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
28#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
29#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
30
31#define _CMD_NSHIFT 0
32#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
33#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
34#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
35
36/*
37 * Direction bits (from host perspective).
38 */
39#define _CMD_DIR_NONE 0U
40#define _CMD_DIR_WRITE 1U
41#define _CMD_DIR_READ 2U
42#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
43
44/*
45 * Flag bits.
46 */
47#define _CMD_FLAGS_NONE 0U
48#define _CMD_FLAGS_NOWAIT 1U
49
50/*
51 * vNIC type bits.
52 */
53#define _CMD_VTYPE_NONE 0U
54#define _CMD_VTYPE_ENET 1U
55#define _CMD_VTYPE_FC 2U
56#define _CMD_VTYPE_SCSI 4U
57#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
58
59/*
60 * Used to create cmds..
61*/
62#define _CMDCF(dir, flags, vtype, nr) \
63 (((dir) << _CMD_DIRSHIFT) | \
64 ((flags) << _CMD_FLAGSSHIFT) | \
65 ((vtype) << _CMD_VTYPESHIFT) | \
66 ((nr) << _CMD_NSHIFT))
67#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
68#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
69
70/*
71 * Used to decode cmds..
72*/
73#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
74#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
75#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
76#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
77
78enum vnic_devcmd_cmd {
79 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
80
81 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
82 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
83
84 /* dev-specific block member:
85 * in: (u16)a0=offset,(u8)a1=size
86 * out: a0=value */
87 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
88
89 /* stats clear */
90 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
91
92 /* stats dump in mem: (u64)a0=paddr to stats area,
93 * (u16)a1=sizeof stats area */
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95
96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
98
99 /* hang detection notification */
100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
101
102 /* MAC address in (u48)a0 */
103 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
104 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
105
106 /* disable/enable promisc mode: (u8)a0=0/1 */
107/***** XXX DEPRECATED *****/
108 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
109
110 /* disable/enable all-multi mode: (u8)a0=0/1 */
111/***** XXX DEPRECATED *****/
112 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
113
114 /* add addr from (u48)a0 */
115 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
116 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
117
118 /* del addr from (u48)a0 */
119 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
120 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
121
122 /* add VLAN id in (u16)a0 */
123 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
124
125 /* del VLAN id in (u16)a0 */
126 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
127
128 /* nic_cfg in (u32)a0 */
129 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
130
131 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
132 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
133
134 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
135 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
136
137 /* initiate softreset */
138 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
139
140 /* softreset status:
141 * out: a0=0 reset complete, a0=1 reset in progress */
142 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
143
144 /* set struct vnic_devcmd_notify buffer in mem:
145 * in:
146 * (u64)a0=paddr to notify (set paddr=0 to unset)
147 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
148 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
149 * out:
150 * (u32)a1 = effective size
151 */
152 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
153
154 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
155 * (u8)a1=PXENV_UNDI_xxx */
156 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
157
158 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
159 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
160
161 /* open status:
162 * out: a0=0 open complete, a0=1 open in progress */
163 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
164
165 /* close vnic */
166 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
167
168 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
169 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
170
171 /* variant of CMD_INIT, with provisioning info
172 * (u64)a0=paddr of vnic_devcmd_provinfo
173 * (u32)a1=sizeof provision info */
174 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
175
176 /* enable virtual link */
177 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
178
179 /* disable virtual link */
180 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
181
182 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
183 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
184
185 /* init status:
186 * out: a0=0 init complete, a0=1 init in progress
187 * if a0=0, a1=errno */
188 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
189
190 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
191 * (u8)a1=INT13_CMD_xxx */
192 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
193
194 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
195 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
196
197 /* undo initialize of virtual link */
198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
199};
200
201/* flags for CMD_OPEN */
202#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
203
204/* flags for CMD_INIT */
205#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
206
207/* flags for CMD_PACKET_FILTER */
208#define CMD_PFILTER_DIRECTED 0x01
209#define CMD_PFILTER_MULTICAST 0x02
210#define CMD_PFILTER_BROADCAST 0x04
211#define CMD_PFILTER_PROMISCUOUS 0x08
212#define CMD_PFILTER_ALL_MULTICAST 0x10
213
214enum vnic_devcmd_status {
215 STAT_NONE = 0,
216 STAT_BUSY = 1 << 0, /* cmd in progress */
217 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
218};
219
220enum vnic_devcmd_error {
221 ERR_SUCCESS = 0,
222 ERR_EINVAL = 1,
223 ERR_EFAULT = 2,
224 ERR_EPERM = 3,
225 ERR_EBUSY = 4,
226 ERR_ECMDUNKNOWN = 5,
227 ERR_EBADSTATE = 6,
228 ERR_ENOMEM = 7,
229 ERR_ETIMEDOUT = 8,
230 ERR_ELINKDOWN = 9,
231};
232
233struct vnic_devcmd_fw_info {
234 char fw_version[32];
235 char fw_build[32];
236 char hw_version[32];
237 char hw_serial_number[32];
238};
239
240struct vnic_devcmd_notify {
241 u32 csum; /* checksum over following words */
242
243 u32 link_state; /* link up == 1 */
244 u32 port_speed; /* effective port speed (rate limit) */
245 u32 mtu; /* MTU */
246 u32 msglvl; /* requested driver msg lvl */
247 u32 uif; /* uplink interface */
248 u32 status; /* status bits (see VNIC_STF_*) */
249 u32 error; /* error code (see ERR_*) for first ERR */
250 u32 link_down_cnt; /* running count of link down transitions */
251};
252#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
253
254struct vnic_devcmd_provinfo {
255 u8 oui[3];
256 u8 type;
257 u8 data[0];
258};
259
260/*
261 * Writing cmd register causes STAT_BUSY to get set in status register.
262 * When cmd completes, STAT_BUSY will be cleared.
263 *
264 * If cmd completed successfully STAT_ERROR will be clear
265 * and args registers contain cmd-specific results.
266 *
267 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
268 *
269 * status register is read-only. While STAT_BUSY is set,
270 * all other register contents are read-only.
271 */
272
273/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
274#define VNIC_DEVCMD_NARGS 15
275struct vnic_devcmd {
276 u32 status; /* RO */
277 u32 cmd; /* RW */
278 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
279};
280
281#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 00000000000..4f4dc8793d2
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26
27void vnic_intr_free(struct vnic_intr *intr)
28{
29 intr->ctrl = NULL;
30}
31
32int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
33 unsigned int index)
34{
35 intr->index = index;
36 intr->vdev = vdev;
37
38 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
39 if (!intr->ctrl) {
40 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
41 index);
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
49 unsigned int coalescing_type, unsigned int mask_on_assertion)
50{
51 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
52 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
53 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
54 iowrite32(0, &intr->ctrl->int_credits);
55}
56
57void vnic_intr_clean(struct vnic_intr *intr)
58{
59 iowrite32(0, &intr->ctrl->int_credits);
60}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 00000000000..d5fb40e7c98
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_INTR_H_
19#define _VNIC_INTR_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_intr_unmask fnic_intr_unmask
29#define vnic_intr_mask fnic_intr_mask
30#define vnic_intr_return_credits fnic_intr_return_credits
31#define vnic_intr_credits fnic_intr_credits
32#define vnic_intr_return_all_credits fnic_intr_return_all_credits
33#define vnic_intr_legacy_pba fnic_intr_legacy_pba
34#define vnic_intr_free fnic_intr_free
35#define vnic_intr_alloc fnic_intr_alloc
36#define vnic_intr_init fnic_intr_init
37#define vnic_intr_clean fnic_intr_clean
38
39#define VNIC_INTR_TIMER_MAX 0xffff
40
41#define VNIC_INTR_TIMER_TYPE_ABS 0
42#define VNIC_INTR_TIMER_TYPE_QUIET 1
43
44/* Interrupt control */
45struct vnic_intr_ctrl {
46 u32 coalescing_timer; /* 0x00 */
47 u32 pad0;
48 u32 coalescing_value; /* 0x08 */
49 u32 pad1;
50 u32 coalescing_type; /* 0x10 */
51 u32 pad2;
52 u32 mask_on_assertion; /* 0x18 */
53 u32 pad3;
54 u32 mask; /* 0x20 */
55 u32 pad4;
56 u32 int_credits; /* 0x28 */
57 u32 pad5;
58 u32 int_credit_return; /* 0x30 */
59 u32 pad6;
60};
61
62struct vnic_intr {
63 unsigned int index;
64 struct vnic_dev *vdev;
65 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
66};
67
68static inline void vnic_intr_unmask(struct vnic_intr *intr)
69{
70 iowrite32(0, &intr->ctrl->mask);
71}
72
73static inline void vnic_intr_mask(struct vnic_intr *intr)
74{
75 iowrite32(1, &intr->ctrl->mask);
76}
77
78static inline void vnic_intr_return_credits(struct vnic_intr *intr,
79 unsigned int credits, int unmask, int reset_timer)
80{
81#define VNIC_INTR_UNMASK_SHIFT 16
82#define VNIC_INTR_RESET_TIMER_SHIFT 17
83
84 u32 int_credit_return = (credits & 0xffff) |
85 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
86 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
87
88 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
89}
90
91static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
92{
93 return ioread32(&intr->ctrl->int_credits);
94}
95
96static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
97{
98 unsigned int credits = vnic_intr_credits(intr);
99 int unmask = 1;
100 int reset_timer = 1;
101
102 vnic_intr_return_credits(intr, credits, unmask, reset_timer);
103}
104
105static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
106{
107 /* read PBA without clearing */
108 return ioread32(legacy_pba);
109}
110
111void vnic_intr_free(struct vnic_intr *intr);
112int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
113 unsigned int index);
114void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
115 unsigned int coalescing_type, unsigned int mask_on_assertion);
116void vnic_intr_clean(struct vnic_intr *intr);
117
118#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 00000000000..f15b83eeace
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_NIC_H_
19#define _VNIC_NIC_H_
20
21/*
22 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
23 * Driver) when both are built with CONFIG options =y
24 */
25#define vnic_set_nic_cfg fnic_set_nic_cfg
26
27#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
28#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
29#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
30#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
31#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
32#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
33#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
34#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
35#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
36#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
37#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
38#define NIC_CFG_RSS_ENABLE (1UL << 22)
39#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
40#define NIC_CFG_RSS_ENABLE_SHIFT 22
41#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
42#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
43#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
44#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
45#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
46#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
47
48static inline void vnic_set_nic_cfg(u32 *nic_cfg,
49 u8 rss_default_cpu, u8 rss_hash_type,
50 u8 rss_hash_bits, u8 rss_base_cpu,
51 u8 rss_enable, u8 tso_ipid_split_en,
52 u8 ig_vlan_strip_en)
53{
54 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
55 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
56 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
57 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
58 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
59 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
60 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
61 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
62 << NIC_CFG_RSS_ENABLE_SHIFT) |
63 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
64 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
65 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
66 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
67}
68
69#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 00000000000..2d842f79d41
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RESOURCE_H_
19#define _VNIC_RESOURCE_H_
20
21#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
22#define VNIC_RES_VERSION 0x00000000L
23
24/* vNIC resource types */
25enum vnic_res_type {
26 RES_TYPE_EOL, /* End-of-list */
27 RES_TYPE_WQ, /* Work queues */
28 RES_TYPE_RQ, /* Receive queues */
29 RES_TYPE_CQ, /* Completion queues */
30 RES_TYPE_RSVD1,
31 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
32 RES_TYPE_RSVD2,
33 RES_TYPE_RSVD3,
34 RES_TYPE_RSVD4,
35 RES_TYPE_RSVD5,
36 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
37 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
38 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
39 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
40 RES_TYPE_RSVD6,
41 RES_TYPE_RSVD7,
42 RES_TYPE_DEVCMD, /* Device command region */
43 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
44
45 RES_TYPE_MAX, /* Count of resource types */
46};
47
48struct vnic_resource_header {
49 u32 magic;
50 u32 version;
51};
52
53struct vnic_resource {
54 u8 type;
55 u8 bar;
56 u8 pad[2];
57 u32 bar_offset;
58 u32 count;
59};
60
61#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 00000000000..bedd0d28563
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_rq.h"
25
26static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
27{
28 struct vnic_rq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = rq->ring.desc_count;
31 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
32
33 vdev = rq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!rq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc rq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = rq->bufs[i];
45 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)rq->ring.descs +
48 rq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = rq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
53 buf->next = rq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 rq->to_use = rq->to_clean = rq->bufs[0];
62 rq->buf_index = 0;
63
64 return 0;
65}
66
67void vnic_rq_free(struct vnic_rq *rq)
68{
69 struct vnic_dev *vdev;
70 unsigned int i;
71
72 vdev = rq->vdev;
73
74 vnic_dev_free_desc_ring(vdev, &rq->ring);
75
76 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
77 kfree(rq->bufs[i]);
78 rq->bufs[i] = NULL;
79 }
80
81 rq->ctrl = NULL;
82}
83
84int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 rq->index = index;
90 rq->vdev = vdev;
91
92 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
93 if (!rq->ctrl) {
94 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_rq_disable(rq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_rq_alloc_bufs(rq);
105 if (err) {
106 vnic_rq_free(rq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118 u32 fetch_index;
119
120 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
121 writeq(paddr, &rq->ctrl->ring_base);
122 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
123 iowrite32(cq_index, &rq->ctrl->cq_index);
124 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
125 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
126 iowrite32(0, &rq->ctrl->dropped_packet_count);
127 iowrite32(0, &rq->ctrl->error_status);
128
129 /* Use current fetch_index as the ring starting point */
130 fetch_index = ioread32(&rq->ctrl->fetch_index);
131 rq->to_use = rq->to_clean =
132 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
133 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
134 iowrite32(fetch_index, &rq->ctrl->posted_index);
135
136 rq->buf_index = 0;
137}
138
139unsigned int vnic_rq_error_status(struct vnic_rq *rq)
140{
141 return ioread32(&rq->ctrl->error_status);
142}
143
144void vnic_rq_enable(struct vnic_rq *rq)
145{
146 iowrite32(1, &rq->ctrl->enable);
147}
148
149int vnic_rq_disable(struct vnic_rq *rq)
150{
151 unsigned int wait;
152
153 iowrite32(0, &rq->ctrl->enable);
154
155 /* Wait for HW to ACK disable request */
156 for (wait = 0; wait < 100; wait++) {
157 if (!(ioread32(&rq->ctrl->running)))
158 return 0;
159 udelay(1);
160 }
161
162 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
163
164 return -ETIMEDOUT;
165}
166
167void vnic_rq_clean(struct vnic_rq *rq,
168 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
169{
170 struct vnic_rq_buf *buf;
171 u32 fetch_index;
172
173 BUG_ON(ioread32(&rq->ctrl->enable));
174
175 buf = rq->to_clean;
176
177 while (vnic_rq_desc_used(rq) > 0) {
178
179 (*buf_clean)(rq, buf);
180
181 buf = rq->to_clean = buf->next;
182 rq->ring.desc_avail++;
183 }
184
185 /* Use current fetch_index as the ring starting point */
186 fetch_index = ioread32(&rq->ctrl->fetch_index);
187 rq->to_use = rq->to_clean =
188 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
189 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
190 iowrite32(fetch_index, &rq->ctrl->posted_index);
191
192 rq->buf_index = 0;
193
194 vnic_dev_clear_desc_ring(&rq->ring);
195}
196
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 00000000000..aebdfbd6ad3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RQ_H_
19#define _VNIC_RQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_rq_desc_avail fnic_rq_desc_avail
30#define vnic_rq_desc_used fnic_rq_desc_used
31#define vnic_rq_next_desc fnic_rq_next_desc
32#define vnic_rq_next_index fnic_rq_next_index
33#define vnic_rq_next_buf_index fnic_rq_next_buf_index
34#define vnic_rq_post fnic_rq_post
35#define vnic_rq_posting_soon fnic_rq_posting_soon
36#define vnic_rq_return_descs fnic_rq_return_descs
37#define vnic_rq_service fnic_rq_service
38#define vnic_rq_fill fnic_rq_fill
39#define vnic_rq_free fnic_rq_free
40#define vnic_rq_alloc fnic_rq_alloc
41#define vnic_rq_init fnic_rq_init
42#define vnic_rq_error_status fnic_rq_error_status
43#define vnic_rq_enable fnic_rq_enable
44#define vnic_rq_disable fnic_rq_disable
45#define vnic_rq_clean fnic_rq_clean
46
47/* Receive queue control */
48struct vnic_rq_ctrl {
49 u64 ring_base; /* 0x00 */
50 u32 ring_size; /* 0x08 */
51 u32 pad0;
52 u32 posted_index; /* 0x10 */
53 u32 pad1;
54 u32 cq_index; /* 0x18 */
55 u32 pad2;
56 u32 enable; /* 0x20 */
57 u32 pad3;
58 u32 running; /* 0x28 */
59 u32 pad4;
60 u32 fetch_index; /* 0x30 */
61 u32 pad5;
62 u32 error_interrupt_enable; /* 0x38 */
63 u32 pad6;
64 u32 error_interrupt_offset; /* 0x40 */
65 u32 pad7;
66 u32 error_status; /* 0x48 */
67 u32 pad8;
68 u32 dropped_packet_count; /* 0x50 */
69 u32 pad9;
70 u32 dropped_packet_count_rc; /* 0x58 */
71 u32 pad10;
72};
73
74/* Break the vnic_rq_buf allocations into blocks of 64 entries */
75#define VNIC_RQ_BUF_BLK_ENTRIES 64
76#define VNIC_RQ_BUF_BLK_SZ \
77 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
78#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
79 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
80#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
81
82struct vnic_rq_buf {
83 struct vnic_rq_buf *next;
84 dma_addr_t dma_addr;
85 void *os_buf;
86 unsigned int os_buf_index;
87 unsigned int len;
88 unsigned int index;
89 void *desc;
90};
91
92struct vnic_rq {
93 unsigned int index;
94 struct vnic_dev *vdev;
95 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
96 struct vnic_dev_ring ring;
97 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
98 struct vnic_rq_buf *to_use;
99 struct vnic_rq_buf *to_clean;
100 void *os_buf_head;
101 unsigned int buf_index;
102 unsigned int pkts_outstanding;
103};
104
105static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
106{
107 /* how many does SW own? */
108 return rq->ring.desc_avail;
109}
110
111static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
112{
113 /* how many does HW own? */
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
115}
116
117static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
118{
119 return rq->to_use->desc;
120}
121
122static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
123{
124 return rq->to_use->index;
125}
126
127static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
128{
129 return rq->buf_index++;
130}
131
132static inline void vnic_rq_post(struct vnic_rq *rq,
133 void *os_buf, unsigned int os_buf_index,
134 dma_addr_t dma_addr, unsigned int len)
135{
136 struct vnic_rq_buf *buf = rq->to_use;
137
138 buf->os_buf = os_buf;
139 buf->os_buf_index = os_buf_index;
140 buf->dma_addr = dma_addr;
141 buf->len = len;
142
143 buf = buf->next;
144 rq->to_use = buf;
145 rq->ring.desc_avail--;
146
147 /* Move the posted_index every nth descriptor
148 */
149
150#ifndef VNIC_RQ_RETURN_RATE
151#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
152#endif
153
154 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
155 /* Adding write memory barrier prevents compiler and/or CPU
156 * reordering, thus avoiding descriptor posting before
157 * descriptor is initialized. Otherwise, hardware can read
158 * stale descriptor fields.
159 */
160 wmb();
161 iowrite32(buf->index, &rq->ctrl->posted_index);
162 }
163}
164
165static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
166{
167 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
168}
169
170static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
171{
172 rq->ring.desc_avail += count;
173}
174
175enum desc_return_options {
176 VNIC_RQ_RETURN_DESC,
177 VNIC_RQ_DEFER_RETURN_DESC,
178};
179
180static inline void vnic_rq_service(struct vnic_rq *rq,
181 struct cq_desc *cq_desc, u16 completed_index,
182 int desc_return, void (*buf_service)(struct vnic_rq *rq,
183 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
184 int skipped, void *opaque), void *opaque)
185{
186 struct vnic_rq_buf *buf;
187 int skipped;
188
189 buf = rq->to_clean;
190 while (1) {
191
192 skipped = (buf->index != completed_index);
193
194 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
195
196 if (desc_return == VNIC_RQ_RETURN_DESC)
197 rq->ring.desc_avail++;
198
199 rq->to_clean = buf->next;
200
201 if (!skipped)
202 break;
203
204 buf = rq->to_clean;
205 }
206}
207
208static inline int vnic_rq_fill(struct vnic_rq *rq,
209 int (*buf_fill)(struct vnic_rq *rq))
210{
211 int err;
212
213 while (vnic_rq_desc_avail(rq) > 1) {
214
215 err = (*buf_fill)(rq);
216 if (err)
217 return err;
218 }
219
220 return 0;
221}
222
223void vnic_rq_free(struct vnic_rq *rq);
224int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
225 unsigned int desc_count, unsigned int desc_size);
226void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
227 unsigned int error_interrupt_enable,
228 unsigned int error_interrupt_offset);
229unsigned int vnic_rq_error_status(struct vnic_rq *rq);
230void vnic_rq_enable(struct vnic_rq *rq);
231int vnic_rq_disable(struct vnic_rq *rq);
232void vnic_rq_clean(struct vnic_rq *rq,
233 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
234
235#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 00000000000..46baa525400
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_SCSI_H_
19#define _VNIC_SCSI_H_
20
21#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
22#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
23
24#define VNIC_FNIC_WQ_DESCS_MIN 64
25#define VNIC_FNIC_WQ_DESCS_MAX 128
26
27#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
28#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
29
30#define VNIC_FNIC_RQ_DESCS_MIN 64
31#define VNIC_FNIC_RQ_DESCS_MAX 128
32
33#define VNIC_FNIC_EDTOV_MIN 1000
34#define VNIC_FNIC_EDTOV_MAX 255000
35#define VNIC_FNIC_EDTOV_DEF 2000
36
37#define VNIC_FNIC_RATOV_MIN 1000
38#define VNIC_FNIC_RATOV_MAX 255000
39
40#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
41#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
42
43#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
44#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
45#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
46
47#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
48#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
49
50#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
51#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
52#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
53
54#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
55#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
56
57#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
58#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
59
60#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
61#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
62
63#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
64#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
65
66#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
67#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
68
69#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
70#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
71
72/* Device-specific region: scsi configuration */
73struct vnic_fc_config {
74 u64 node_wwn;
75 u64 port_wwn;
76 u32 flags;
77 u32 wq_enet_desc_count;
78 u32 wq_copy_desc_count;
79 u32 rq_desc_count;
80 u32 flogi_retries;
81 u32 flogi_timeout;
82 u32 plogi_retries;
83 u32 plogi_timeout;
84 u32 io_throttle_count;
85 u32 link_down_timeout;
86 u32 port_down_timeout;
87 u32 port_down_io_retries;
88 u32 luns_per_tgt;
89 u16 maxdatafieldsize;
90 u16 ed_tov;
91 u16 ra_tov;
92 u16 intr_timer;
93 u8 intr_timer_type;
94};
95
96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
97#define VFCF_PERBI 0x2 /* persistent binding info available */
98
99#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 00000000000..5372e23c1cb
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_STATS_H_
19#define _VNIC_STATS_H_
20
21/* Tx statistics */
22struct vnic_tx_stats {
23 u64 tx_frames_ok;
24 u64 tx_unicast_frames_ok;
25 u64 tx_multicast_frames_ok;
26 u64 tx_broadcast_frames_ok;
27 u64 tx_bytes_ok;
28 u64 tx_unicast_bytes_ok;
29 u64 tx_multicast_bytes_ok;
30 u64 tx_broadcast_bytes_ok;
31 u64 tx_drops;
32 u64 tx_errors;
33 u64 tx_tso;
34 u64 rsvd[16];
35};
36
37/* Rx statistics */
38struct vnic_rx_stats {
39 u64 rx_frames_ok;
40 u64 rx_frames_total;
41 u64 rx_unicast_frames_ok;
42 u64 rx_multicast_frames_ok;
43 u64 rx_broadcast_frames_ok;
44 u64 rx_bytes_ok;
45 u64 rx_unicast_bytes_ok;
46 u64 rx_multicast_bytes_ok;
47 u64 rx_broadcast_bytes_ok;
48 u64 rx_drop;
49 u64 rx_no_bufs;
50 u64 rx_errors;
51 u64 rx_rss;
52 u64 rx_crc_errors;
53 u64 rx_frames_64;
54 u64 rx_frames_127;
55 u64 rx_frames_255;
56 u64 rx_frames_511;
57 u64 rx_frames_1023;
58 u64 rx_frames_1518;
59 u64 rx_frames_to_max;
60 u64 rsvd[16];
61};
62
63struct vnic_stats {
64 struct vnic_tx_stats tx;
65 struct vnic_rx_stats rx;
66};
67
68#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 00000000000..1f9ea790d13
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_wq.h"
25
26static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
27{
28 struct vnic_wq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = wq->ring.desc_count;
31 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
32
33 vdev = wq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!wq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc wq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = wq->bufs[i];
45 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)wq->ring.descs +
48 wq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = wq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
53 buf->next = wq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 wq->to_use = wq->to_clean = wq->bufs[0];
62
63 return 0;
64}
65
66void vnic_wq_free(struct vnic_wq *wq)
67{
68 struct vnic_dev *vdev;
69 unsigned int i;
70
71 vdev = wq->vdev;
72
73 vnic_dev_free_desc_ring(vdev, &wq->ring);
74
75 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
76 kfree(wq->bufs[i]);
77 wq->bufs[i] = NULL;
78 }
79
80 wq->ctrl = NULL;
81
82}
83
84int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 wq->index = index;
90 wq->vdev = vdev;
91
92 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
93 if (!wq->ctrl) {
94 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_wq_disable(wq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_wq_alloc_bufs(wq);
105 if (err) {
106 vnic_wq_free(wq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118
119 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
120 writeq(paddr, &wq->ctrl->ring_base);
121 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
122 iowrite32(0, &wq->ctrl->fetch_index);
123 iowrite32(0, &wq->ctrl->posted_index);
124 iowrite32(cq_index, &wq->ctrl->cq_index);
125 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
126 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
127 iowrite32(0, &wq->ctrl->error_status);
128}
129
130unsigned int vnic_wq_error_status(struct vnic_wq *wq)
131{
132 return ioread32(&wq->ctrl->error_status);
133}
134
135void vnic_wq_enable(struct vnic_wq *wq)
136{
137 iowrite32(1, &wq->ctrl->enable);
138}
139
140int vnic_wq_disable(struct vnic_wq *wq)
141{
142 unsigned int wait;
143
144 iowrite32(0, &wq->ctrl->enable);
145
146 /* Wait for HW to ACK disable request */
147 for (wait = 0; wait < 100; wait++) {
148 if (!(ioread32(&wq->ctrl->running)))
149 return 0;
150 udelay(1);
151 }
152
153 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
154
155 return -ETIMEDOUT;
156}
157
158void vnic_wq_clean(struct vnic_wq *wq,
159 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
160{
161 struct vnic_wq_buf *buf;
162
163 BUG_ON(ioread32(&wq->ctrl->enable));
164
165 buf = wq->to_clean;
166
167 while (vnic_wq_desc_used(wq) > 0) {
168
169 (*buf_clean)(wq, buf);
170
171 buf = wq->to_clean = buf->next;
172 wq->ring.desc_avail++;
173 }
174
175 wq->to_use = wq->to_clean = wq->bufs[0];
176
177 iowrite32(0, &wq->ctrl->fetch_index);
178 iowrite32(0, &wq->ctrl->posted_index);
179 iowrite32(0, &wq->ctrl->error_status);
180
181 vnic_dev_clear_desc_ring(&wq->ring);
182}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 00000000000..5cd094f7928
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_H_
19#define _VNIC_WQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_wq_desc_avail fnic_wq_desc_avail
30#define vnic_wq_desc_used fnic_wq_desc_used
31#define vnic_wq_next_desc fni_cwq_next_desc
32#define vnic_wq_post fnic_wq_post
33#define vnic_wq_service fnic_wq_service
34#define vnic_wq_free fnic_wq_free
35#define vnic_wq_alloc fnic_wq_alloc
36#define vnic_wq_init fnic_wq_init
37#define vnic_wq_error_status fnic_wq_error_status
38#define vnic_wq_enable fnic_wq_enable
39#define vnic_wq_disable fnic_wq_disable
40#define vnic_wq_clean fnic_wq_clean
41
42/* Work queue control */
43struct vnic_wq_ctrl {
44 u64 ring_base; /* 0x00 */
45 u32 ring_size; /* 0x08 */
46 u32 pad0;
47 u32 posted_index; /* 0x10 */
48 u32 pad1;
49 u32 cq_index; /* 0x18 */
50 u32 pad2;
51 u32 enable; /* 0x20 */
52 u32 pad3;
53 u32 running; /* 0x28 */
54 u32 pad4;
55 u32 fetch_index; /* 0x30 */
56 u32 pad5;
57 u32 dca_value; /* 0x38 */
58 u32 pad6;
59 u32 error_interrupt_enable; /* 0x40 */
60 u32 pad7;
61 u32 error_interrupt_offset; /* 0x48 */
62 u32 pad8;
63 u32 error_status; /* 0x50 */
64 u32 pad9;
65};
66
67struct vnic_wq_buf {
68 struct vnic_wq_buf *next;
69 dma_addr_t dma_addr;
70 void *os_buf;
71 unsigned int len;
72 unsigned int index;
73 int sop;
74 void *desc;
75};
76
77/* Break the vnic_wq_buf allocations into blocks of 64 entries */
78#define VNIC_WQ_BUF_BLK_ENTRIES 64
79#define VNIC_WQ_BUF_BLK_SZ \
80 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
81#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
82 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
83#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
84
85struct vnic_wq {
86 unsigned int index;
87 struct vnic_dev *vdev;
88 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
89 struct vnic_dev_ring ring;
90 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
91 struct vnic_wq_buf *to_use;
92 struct vnic_wq_buf *to_clean;
93 unsigned int pkts_outstanding;
94};
95
96static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
97{
98 /* how many does SW own? */
99 return wq->ring.desc_avail;
100}
101
102static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
103{
104 /* how many does HW own? */
105 return wq->ring.desc_count - wq->ring.desc_avail - 1;
106}
107
108static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
109{
110 return wq->to_use->desc;
111}
112
113static inline void vnic_wq_post(struct vnic_wq *wq,
114 void *os_buf, dma_addr_t dma_addr,
115 unsigned int len, int sop, int eop)
116{
117 struct vnic_wq_buf *buf = wq->to_use;
118
119 buf->sop = sop;
120 buf->os_buf = eop ? os_buf : NULL;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 if (eop) {
126 /* Adding write memory barrier prevents compiler and/or CPU
127 * reordering, thus avoiding descriptor posting before
128 * descriptor is initialized. Otherwise, hardware can read
129 * stale descriptor fields.
130 */
131 wmb();
132 iowrite32(buf->index, &wq->ctrl->posted_index);
133 }
134 wq->to_use = buf;
135
136 wq->ring.desc_avail--;
137}
138
139static inline void vnic_wq_service(struct vnic_wq *wq,
140 struct cq_desc *cq_desc, u16 completed_index,
141 void (*buf_service)(struct vnic_wq *wq,
142 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
143 void *opaque)
144{
145 struct vnic_wq_buf *buf;
146
147 buf = wq->to_clean;
148 while (1) {
149
150 (*buf_service)(wq, cq_desc, buf, opaque);
151
152 wq->ring.desc_avail++;
153
154 wq->to_clean = buf->next;
155
156 if (buf->index == completed_index)
157 break;
158
159 buf = wq->to_clean;
160 }
161}
162
163void vnic_wq_free(struct vnic_wq *wq);
164int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
165 unsigned int desc_count, unsigned int desc_size);
166void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
167 unsigned int error_interrupt_enable,
168 unsigned int error_interrupt_offset);
169unsigned int vnic_wq_error_status(struct vnic_wq *wq);
170void vnic_wq_enable(struct vnic_wq *wq);
171int vnic_wq_disable(struct vnic_wq *wq);
172void vnic_wq_clean(struct vnic_wq *wq,
173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
174
175#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 00000000000..9eab7e7caf3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_wq_copy.h"
24
25void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
26{
27 iowrite32(1, &wq->ctrl->enable);
28}
29
30int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
31{
32 unsigned int wait;
33
34 iowrite32(0, &wq->ctrl->enable);
35
36 /* Wait for HW to ACK disable request */
37 for (wait = 0; wait < 100; wait++) {
38 if (!(ioread32(&wq->ctrl->running)))
39 return 0;
40 udelay(1);
41 }
42
43 printk(KERN_ERR "Failed to disable Copy WQ[%d],"
44 " fetch index=%d, posted_index=%d\n",
45 wq->index, ioread32(&wq->ctrl->fetch_index),
46 ioread32(&wq->ctrl->posted_index));
47
48 return -ENODEV;
49}
50
51void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
52 void (*q_clean)(struct vnic_wq_copy *wq,
53 struct fcpio_host_req *wq_desc))
54{
55 BUG_ON(ioread32(&wq->ctrl->enable));
56
57 if (vnic_wq_copy_desc_in_use(wq))
58 vnic_wq_copy_service(wq, -1, q_clean);
59
60 wq->to_use_index = wq->to_clean_index = 0;
61
62 iowrite32(0, &wq->ctrl->fetch_index);
63 iowrite32(0, &wq->ctrl->posted_index);
64 iowrite32(0, &wq->ctrl->error_status);
65
66 vnic_dev_clear_desc_ring(&wq->ring);
67}
68
69void vnic_wq_copy_free(struct vnic_wq_copy *wq)
70{
71 struct vnic_dev *vdev;
72
73 vdev = wq->vdev;
74 vnic_dev_free_desc_ring(vdev, &wq->ring);
75 wq->ctrl = NULL;
76}
77
78int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
79 unsigned int index, unsigned int desc_count,
80 unsigned int desc_size)
81{
82 int err;
83
84 wq->index = index;
85 wq->vdev = vdev;
86 wq->to_use_index = wq->to_clean_index = 0;
87 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
88 if (!wq->ctrl) {
89 printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
90 return -EINVAL;
91 }
92
93 vnic_wq_copy_disable(wq);
94
95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
96 if (err)
97 return err;
98
99 return 0;
100}
101
102void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
103 unsigned int error_interrupt_enable,
104 unsigned int error_interrupt_offset)
105{
106 u64 paddr;
107
108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
109 writeq(paddr, &wq->ctrl->ring_base);
110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
111 iowrite32(0, &wq->ctrl->fetch_index);
112 iowrite32(0, &wq->ctrl->posted_index);
113 iowrite32(cq_index, &wq->ctrl->cq_index);
114 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
115 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
116}
117
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 00000000000..6aff9740c3d
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_COPY_H_
19#define _VNIC_WQ_COPY_H_
20
21#include <linux/pci.h>
22#include "vnic_wq.h"
23#include "fcpio.h"
24
25#define VNIC_WQ_COPY_MAX 1
26
27struct vnic_wq_copy {
28 unsigned int index;
29 struct vnic_dev *vdev;
30 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
31 struct vnic_dev_ring ring;
32 unsigned to_use_index;
33 unsigned to_clean_index;
34};
35
36static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
37{
38 return wq->ring.desc_avail;
39}
40
41static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
42{
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail;
44}
45
46static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
47{
48 struct fcpio_host_req *desc = wq->ring.descs;
49 return &desc[wq->to_use_index];
50}
51
52static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
53{
54
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ?
56 (wq->to_use_index = 0) : (wq->to_use_index++);
57 wq->ring.desc_avail--;
58
59 /* Adding write memory barrier prevents compiler and/or CPU
60 * reordering, thus avoiding descriptor posting before
61 * descriptor is initialized. Otherwise, hardware can read
62 * stale descriptor fields.
63 */
64 wmb();
65
66 iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
67}
68
69static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
70{
71 unsigned int cnt;
72
73 if (wq->to_clean_index <= index)
74 cnt = (index - wq->to_clean_index) + 1;
75 else
76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
77
78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
79 wq->ring.desc_avail += cnt;
80
81}
82
83static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
84 u16 completed_index,
85 void (*q_service)(struct vnic_wq_copy *wq,
86 struct fcpio_host_req *wq_desc))
87{
88 struct fcpio_host_req *wq_desc = wq->ring.descs;
89 unsigned int curr_index;
90
91 while (1) {
92
93 if (q_service)
94 (*q_service)(wq, &wq_desc[wq->to_clean_index]);
95
96 wq->ring.desc_avail++;
97
98 curr_index = wq->to_clean_index;
99
100 /* increment the to-clean index so that we start
101 * with an unprocessed index next time we enter the loop
102 */
103 ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
104 (wq->to_clean_index = 0) : (wq->to_clean_index++);
105
106 if (curr_index == completed_index)
107 break;
108
109 /* we have cleaned all the entries */
110 if ((completed_index == (u16)-1) &&
111 (wq->to_clean_index == wq->to_use_index))
112 break;
113 }
114}
115
116void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
117int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
118void vnic_wq_copy_free(struct vnic_wq_copy *wq);
119int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
120 unsigned int index, unsigned int desc_count, unsigned int desc_size);
121void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
122 unsigned int error_interrupt_enable,
123 unsigned int error_interrupt_offset);
124void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
125 void (*q_clean)(struct vnic_wq_copy *wq,
126 struct fcpio_host_req *wq_desc));
127
128#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 00000000000..b121cbad18b
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _WQ_ENET_DESC_H_
19#define _WQ_ENET_DESC_H_
20
21/* Ethernet work queue descriptor: 16B */
22struct wq_enet_desc {
23 __le64 address;
24 __le16 length;
25 __le16 mss_loopback;
26 __le16 header_length_flags;
27 __le16 vlan_tag;
28};
29
30#define WQ_ENET_ADDR_BITS 64
31#define WQ_ENET_LEN_BITS 14
32#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
33#define WQ_ENET_MSS_BITS 14
34#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
35#define WQ_ENET_MSS_SHIFT 2
36#define WQ_ENET_LOOPBACK_SHIFT 1
37#define WQ_ENET_HDRLEN_BITS 10
38#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
39#define WQ_ENET_FLAGS_OM_BITS 2
40#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
41#define WQ_ENET_FLAGS_EOP_SHIFT 12
42#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
43#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
44#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
45
46#define WQ_ENET_OFFLOAD_MODE_CSUM 0
47#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
48#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
49#define WQ_ENET_OFFLOAD_MODE_TSO 3
50
51static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
52 u64 address, u16 length, u16 mss, u16 header_length,
53 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
54 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
55{
56 desc->address = cpu_to_le64(address);
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
59 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
60 desc->header_length_flags = cpu_to_le16(
61 (header_length & WQ_ENET_HDRLEN_MASK) |
62 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
63 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
64 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
65 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
66 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
67 desc->vlan_tag = cpu_to_le16(vlan_tag);
68}
69
70static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
71 u64 *address, u16 *length, u16 *mss, u16 *header_length,
72 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
73 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
74{
75 *address = le64_to_cpu(desc->address);
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
78 WQ_ENET_MSS_MASK;
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
80 WQ_ENET_LOOPBACK_SHIFT) & 1);
81 *header_length = le16_to_cpu(desc->header_length_flags) &
82 WQ_ENET_HDRLEN_MASK;
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
84 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
93 *vlan_tag = le16_to_cpu(desc->vlan_tag);
94}
95
96#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e1..1258da34fbc 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
152 struct Scsi_Host *host, gdth_ha_str *ha) 152 struct Scsi_Host *host, gdth_ha_str *ha)
153{ 153{
154 int size = 0,len = 0; 154 int size = 0,len = 0;
155 int hlen;
155 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
156 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
157 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
192 if (reserve_list[0] == 0xff) 193 if (reserve_list[0] == 0xff)
193 strcpy(hrec, "--"); 194 strcpy(hrec, "--");
194 else { 195 else {
195 sprintf(hrec, "%d", reserve_list[0]); 196 hlen = sprintf(hrec, "%d", reserve_list[0]);
196 for (i = 1; i < MAX_RES_ARGS; i++) { 197 for (i = 1; i < MAX_RES_ARGS; i++) {
197 if (reserve_list[i] == 0xff) 198 if (reserve_list[i] == 0xff)
198 break; 199 break;
199 sprintf(hrec,"%s,%d", hrec, reserve_list[i]); 200 hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
200 } 201 }
201 } 202 }
202 size = sprintf(buffer+len, 203 size = sprintf(buffer+len,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a..b4b805e8d7d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, 110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, 111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, 112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, 113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, 114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); 143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); 144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145static void ibmvfc_tgt_query_target(struct ibmvfc_target *); 145static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146static void ibmvfc_npiv_logout(struct ibmvfc_host *);
146 147
147static const char *unknown_error = "unknown error"; 148static const char *unknown_error = "unknown error";
148 149
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
275 int fc_rsp_len = rsp->fcp_rsp_len; 276 int fc_rsp_len = rsp->fcp_rsp_len;
276 277
277 if ((rsp->flags & FCP_RSP_LEN_VALID) && 278 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || 279 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279 rsp->data.info.rsp_code)) 280 rsp->data.info.rsp_code))
280 return DID_ERROR << 16; 281 return DID_ERROR << 16;
281 282
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
431 case IBMVFC_TGT_ACTION_DEL_RPORT: 432 case IBMVFC_TGT_ACTION_DEL_RPORT:
432 break; 433 break;
433 default: 434 default:
435 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
436 tgt->add_rport = 0;
434 tgt->action = action; 437 tgt->action = action;
435 break; 438 break;
436 } 439 }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
475 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) 478 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476 vhost->action = action; 479 vhost->action = action;
477 break; 480 break;
481 case IBMVFC_HOST_ACTION_LOGO_WAIT:
482 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
483 vhost->action = action;
484 break;
478 case IBMVFC_HOST_ACTION_INIT_WAIT: 485 case IBMVFC_HOST_ACTION_INIT_WAIT:
479 if (vhost->action == IBMVFC_HOST_ACTION_INIT) 486 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480 vhost->action = action; 487 vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
483 switch (vhost->action) { 490 switch (vhost->action) {
484 case IBMVFC_HOST_ACTION_INIT_WAIT: 491 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 case IBMVFC_HOST_ACTION_NONE: 492 case IBMVFC_HOST_ACTION_NONE:
486 case IBMVFC_HOST_ACTION_TGT_ADD: 493 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
487 vhost->action = action; 494 vhost->action = action;
488 break; 495 break;
489 default: 496 default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
494 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 501 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495 vhost->action = action; 502 vhost->action = action;
496 break; 503 break;
504 case IBMVFC_HOST_ACTION_LOGO:
497 case IBMVFC_HOST_ACTION_INIT: 505 case IBMVFC_HOST_ACTION_INIT:
498 case IBMVFC_HOST_ACTION_TGT_DEL: 506 case IBMVFC_HOST_ACTION_TGT_DEL:
499 case IBMVFC_HOST_ACTION_QUERY_TGTS: 507 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 508 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501 case IBMVFC_HOST_ACTION_TGT_ADD:
502 case IBMVFC_HOST_ACTION_NONE: 509 case IBMVFC_HOST_ACTION_NONE:
503 default: 510 default:
504 vhost->action = action; 511 vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
576 } 583 }
577 584
578 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
579 tgt->need_login = 1; 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
580 scsi_block_requests(vhost->host); 587 scsi_block_requests(vhost->host);
581 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 588 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582 vhost->job_step = ibmvfc_npiv_login; 589 vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
647 654
648 vhost->state = IBMVFC_NO_CRQ; 655 vhost->state = IBMVFC_NO_CRQ;
656 vhost->logged_in = 0;
649 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 657 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
650 free_page((unsigned long)crq->msgs); 658 free_page((unsigned long)crq->msgs);
651} 659}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
693 701
694 vhost->state = IBMVFC_NO_CRQ; 702 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0;
695 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
696 705
697 /* Clean out the queue */ 706 /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
807} 816}
808 817
809/** 818/**
810 * __ibmvfc_reset_host - Reset the connection to the server (no locking) 819 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
811 * @vhost: struct ibmvfc host to reset 820 * @vhost: struct ibmvfc host to reset
812 **/ 821 **/
813static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) 822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
814{ 823{
815 int rc; 824 int rc;
816 825
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
826} 835}
827 836
828/** 837/**
829 * ibmvfc_reset_host - Reset the connection to the server 838 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
830 * @vhost: struct ibmvfc host to reset 839 * @vhost: struct ibmvfc host to reset
831 **/ 840 **/
841static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
842{
843 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
844 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
845 scsi_block_requests(vhost->host);
846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
847 vhost->job_step = ibmvfc_npiv_logout;
848 wake_up(&vhost->work_wait_q);
849 } else
850 ibmvfc_hard_reset_host(vhost);
851}
852
853/**
854 * ibmvfc_reset_host - Reset the connection to the server
855 * @vhost: ibmvfc host struct
856 **/
832static void ibmvfc_reset_host(struct ibmvfc_host *vhost) 857static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
833{ 858{
834 unsigned long flags; 859 unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
842 * ibmvfc_retry_host_init - Retry host initialization if allowed 867 * ibmvfc_retry_host_init - Retry host initialization if allowed
843 * @vhost: ibmvfc host struct 868 * @vhost: ibmvfc host struct
844 * 869 *
870 * Returns: 1 if init will be retried / 0 if not
871 *
845 **/ 872 **/
846static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 873static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
847{ 874{
875 int retry = 0;
876
848 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 877 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
849 vhost->delay_init = 1; 878 vhost->delay_init = 1;
850 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { 879 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 882 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) 883 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 884 __ibmvfc_reset_host(vhost);
856 else 885 else {
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
887 retry = 1;
888 }
858 } 889 }
859 890
860 wake_up(&vhost->work_wait_q); 891 wake_up(&vhost->work_wait_q);
892 return retry;
861} 893}
862 894
863/** 895/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1137 login_info->partition_num = vhost->partition_number; 1169 login_info->partition_num = vhost->partition_number;
1138 login_info->vfc_frame_version = 1; 1170 login_info->vfc_frame_version = 1;
1139 login_info->fcp_version = 3; 1171 login_info->fcp_version = 3;
1172 login_info->flags = IBMVFC_FLUSH_ON_HALT;
1140 if (vhost->client_migrated) 1173 if (vhost->client_migrated)
1141 login_info->flags = IBMVFC_CLIENT_MIGRATED; 1174 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1142 1175
1143 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1176 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1144 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1177 login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1452} 1485}
1453 1486
1454/** 1487/**
1488 * ibmvfc_relogin - Log back into the specified device
1489 * @sdev: scsi device struct
1490 *
1491 **/
1492static void ibmvfc_relogin(struct scsi_device *sdev)
1493{
1494 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1496 struct ibmvfc_target *tgt;
1497
1498 list_for_each_entry(tgt, &vhost->targets, queue) {
1499 if (rport == tgt->rport) {
1500 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1501 break;
1502 }
1503 }
1504
1505 ibmvfc_reinit_host(vhost);
1506}
1507
1508/**
1455 * ibmvfc_scsi_done - Handle responses from commands 1509 * ibmvfc_scsi_done - Handle responses from commands
1456 * @evt: ibmvfc event to be handled 1510 * @evt: ibmvfc event to be handled
1457 * 1511 *
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1483 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1537 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1484 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1538 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1485 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) 1539 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1486 ibmvfc_reinit_host(evt->vhost); 1540 ibmvfc_relogin(cmnd->device);
1487 1541
1488 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) 1542 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1489 cmnd->result = (DID_ERROR << 16); 1543 cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2148 struct ibmvfc_host *vhost) 2202 struct ibmvfc_host *vhost)
2149{ 2203{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2204 const char *desc = ibmvfc_get_ae_desc(crq->event);
2205 struct ibmvfc_target *tgt;
2151 2206
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2207 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2208 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2209
2155 switch (crq->event) { 2210 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP:
2157 case IBMVFC_AE_RESUME: 2211 case IBMVFC_AE_RESUME:
2212 switch (crq->link_state) {
2213 case IBMVFC_AE_LS_LINK_DOWN:
2214 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2215 break;
2216 case IBMVFC_AE_LS_LINK_DEAD:
2217 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2218 break;
2219 case IBMVFC_AE_LS_LINK_UP:
2220 case IBMVFC_AE_LS_LINK_BOUNCED:
2221 default:
2222 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2223 vhost->delay_init = 1;
2224 __ibmvfc_reset_host(vhost);
2225 break;
2226 };
2227
2228 break;
2229 case IBMVFC_AE_LINK_UP:
2158 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2230 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2159 vhost->delay_init = 1; 2231 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost); 2232 __ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2168 case IBMVFC_AE_SCN_NPORT: 2240 case IBMVFC_AE_SCN_NPORT:
2169 case IBMVFC_AE_SCN_GROUP: 2241 case IBMVFC_AE_SCN_GROUP:
2170 vhost->events_to_log |= IBMVFC_AE_RSCN; 2242 vhost->events_to_log |= IBMVFC_AE_RSCN;
2243 ibmvfc_reinit_host(vhost);
2244 break;
2171 case IBMVFC_AE_ELS_LOGO: 2245 case IBMVFC_AE_ELS_LOGO:
2172 case IBMVFC_AE_ELS_PRLO: 2246 case IBMVFC_AE_ELS_PRLO:
2173 case IBMVFC_AE_ELS_PLOGI: 2247 case IBMVFC_AE_ELS_PLOGI:
2248 list_for_each_entry(tgt, &vhost->targets, queue) {
2249 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2250 break;
2251 if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2252 continue;
2253 if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2254 continue;
2255 if (crq->node_name && tgt->ids.node_name != crq->node_name)
2256 continue;
2257 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2258 }
2259
2174 ibmvfc_reinit_host(vhost); 2260 ibmvfc_reinit_host(vhost);
2175 break; 2261 break;
2176 case IBMVFC_AE_LINK_DOWN: 2262 case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2222 return; 2308 return;
2223 case IBMVFC_CRQ_XPORT_EVENT: 2309 case IBMVFC_CRQ_XPORT_EVENT:
2224 vhost->state = IBMVFC_NO_CRQ; 2310 vhost->state = IBMVFC_NO_CRQ;
2311 vhost->logged_in = 0;
2225 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2312 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2226 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2313 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2227 /* We need to re-setup the interpartition connection */ 2314 /* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2299 done = 1; 2386 done = 1;
2300 } 2387 }
2301 2388
2302 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) 2389 if (vhost->scan_complete)
2303 done = 1; 2390 done = 1;
2304 spin_unlock_irqrestore(shost->host_lock, flags); 2391 spin_unlock_irqrestore(shost->host_lock, flags);
2305 return done; 2392 return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2434 vhost->login_buf->resp.partition_name); 2521 vhost->login_buf->resp.partition_name);
2435} 2522}
2436 2523
2437static struct device_attribute ibmvfc_host_partition_name = {
2438 .attr = {
2439 .name = "partition_name",
2440 .mode = S_IRUGO,
2441 },
2442 .show = ibmvfc_show_host_partition_name,
2443};
2444
2445static ssize_t ibmvfc_show_host_device_name(struct device *dev, 2524static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2446 struct device_attribute *attr, char *buf) 2525 struct device_attribute *attr, char *buf)
2447{ 2526{
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2452 vhost->login_buf->resp.device_name); 2531 vhost->login_buf->resp.device_name);
2453} 2532}
2454 2533
2455static struct device_attribute ibmvfc_host_device_name = {
2456 .attr = {
2457 .name = "device_name",
2458 .mode = S_IRUGO,
2459 },
2460 .show = ibmvfc_show_host_device_name,
2461};
2462
2463static ssize_t ibmvfc_show_host_loc_code(struct device *dev, 2534static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2464 struct device_attribute *attr, char *buf) 2535 struct device_attribute *attr, char *buf)
2465{ 2536{
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2470 vhost->login_buf->resp.port_loc_code); 2541 vhost->login_buf->resp.port_loc_code);
2471} 2542}
2472 2543
2473static struct device_attribute ibmvfc_host_loc_code = {
2474 .attr = {
2475 .name = "port_loc_code",
2476 .mode = S_IRUGO,
2477 },
2478 .show = ibmvfc_show_host_loc_code,
2479};
2480
2481static ssize_t ibmvfc_show_host_drc_name(struct device *dev, 2544static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2482 struct device_attribute *attr, char *buf) 2545 struct device_attribute *attr, char *buf)
2483{ 2546{
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2488 vhost->login_buf->resp.drc_name); 2551 vhost->login_buf->resp.drc_name);
2489} 2552}
2490 2553
2491static struct device_attribute ibmvfc_host_drc_name = {
2492 .attr = {
2493 .name = "drc_name",
2494 .mode = S_IRUGO,
2495 },
2496 .show = ibmvfc_show_host_drc_name,
2497};
2498
2499static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, 2554static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2500 struct device_attribute *attr, char *buf) 2555 struct device_attribute *attr, char *buf)
2501{ 2556{
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2504 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); 2559 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2505} 2560}
2506 2561
2507static struct device_attribute ibmvfc_host_npiv_version = { 2562static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2508 .attr = { 2563 struct device_attribute *attr, char *buf)
2509 .name = "npiv_version", 2564{
2510 .mode = S_IRUGO, 2565 struct Scsi_Host *shost = class_to_shost(dev);
2511 }, 2566 struct ibmvfc_host *vhost = shost_priv(shost);
2512 .show = ibmvfc_show_host_npiv_version, 2567 return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2513}; 2568}
2514 2569
2515/** 2570/**
2516 * ibmvfc_show_log_level - Show the adapter's error logging level 2571 * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
2556 return strlen(buf); 2611 return strlen(buf);
2557} 2612}
2558 2613
2559static struct device_attribute ibmvfc_log_level_attr = { 2614static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2560 .attr = { 2615static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2561 .name = "log_level", 2616static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2562 .mode = S_IRUGO | S_IWUSR, 2617static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2563 }, 2618static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2564 .show = ibmvfc_show_log_level, 2619static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2565 .store = ibmvfc_store_log_level 2620static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2566}; 2621 ibmvfc_show_log_level, ibmvfc_store_log_level);
2567 2622
2568#ifdef CONFIG_SCSI_IBMVFC_TRACE 2623#ifdef CONFIG_SCSI_IBMVFC_TRACE
2569/** 2624/**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
2612#endif 2667#endif
2613 2668
2614static struct device_attribute *ibmvfc_attrs[] = { 2669static struct device_attribute *ibmvfc_attrs[] = {
2615 &ibmvfc_host_partition_name, 2670 &dev_attr_partition_name,
2616 &ibmvfc_host_device_name, 2671 &dev_attr_device_name,
2617 &ibmvfc_host_loc_code, 2672 &dev_attr_port_loc_code,
2618 &ibmvfc_host_drc_name, 2673 &dev_attr_drc_name,
2619 &ibmvfc_host_npiv_version, 2674 &dev_attr_npiv_version,
2620 &ibmvfc_log_level_attr, 2675 &dev_attr_capabilities,
2676 &dev_attr_log_level,
2621 NULL 2677 NULL
2622}; 2678};
2623 2679
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2774 * @tgt: ibmvfc target struct 2830 * @tgt: ibmvfc target struct
2775 * @job_step: initialization job step 2831 * @job_step: initialization job step
2776 * 2832 *
2833 * Returns: 1 if step will be retried / 0 if not
2834 *
2777 **/ 2835 **/
2778static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2836static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2779 void (*job_step) (struct ibmvfc_target *)) 2837 void (*job_step) (struct ibmvfc_target *))
2780{ 2838{
2781 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { 2839 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2782 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2840 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2783 wake_up(&tgt->vhost->work_wait_q); 2841 wake_up(&tgt->vhost->work_wait_q);
2842 return 0;
2784 } else 2843 } else
2785 ibmvfc_init_tgt(tgt, job_step); 2844 ibmvfc_init_tgt(tgt, job_step);
2845 return 1;
2786} 2846}
2787 2847
2788/* Defined in FC-LS */ 2848/* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2831 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2891 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2832 struct ibmvfc_prli_svc_parms *parms = &rsp->parms; 2892 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2833 u32 status = rsp->common.status; 2893 u32 status = rsp->common.status;
2834 int index; 2894 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
2835 2895
2836 vhost->discovery_threads--; 2896 vhost->discovery_threads--;
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2897 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2850 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; 2910 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2851 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) 2911 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2852 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 2912 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2853 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2913 tgt->add_rport = 1;
2854 } else 2914 } else
2855 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2915 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2856 } else if (prli_rsp[index].retry) 2916 } else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2867 break; 2927 break;
2868 case IBMVFC_MAD_FAILED: 2928 case IBMVFC_MAD_FAILED:
2869 default: 2929 default:
2870 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2871 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2872 rsp->status, rsp->error, status);
2873 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2930 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2874 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2931 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2875 else 2932 else
2876 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2933 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2934
2935 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2936 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2937 rsp->status, rsp->error, status);
2877 break; 2938 break;
2878 }; 2939 };
2879 2940
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2932 struct ibmvfc_host *vhost = evt->vhost; 2993 struct ibmvfc_host *vhost = evt->vhost;
2933 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; 2994 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2934 u32 status = rsp->common.status; 2995 u32 status = rsp->common.status;
2996 int level = IBMVFC_DEFAULT_LOG_LEVEL;
2935 2997
2936 vhost->discovery_threads--; 2998 vhost->discovery_threads--;
2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2999 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2960 break; 3022 break;
2961 case IBMVFC_MAD_FAILED: 3023 case IBMVFC_MAD_FAILED:
2962 default: 3024 default:
2963 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2964 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2965 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2966 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2967
2968 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3025 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2969 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 3026 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2970 else 3027 else
2971 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3028 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3029
3030 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3031 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3032 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3033 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2972 break; 3034 break;
2973 }; 3035 };
2974 3036
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3129 case IBMVFC_MAD_SUCCESS: 3191 case IBMVFC_MAD_SUCCESS:
3130 tgt_dbg(tgt, "ADISC succeeded\n"); 3192 tgt_dbg(tgt, "ADISC succeeded\n");
3131 if (ibmvfc_adisc_needs_plogi(mad, tgt)) 3193 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3132 tgt->need_login = 1; 3194 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3133 break; 3195 break;
3134 case IBMVFC_MAD_DRIVER_FAILED: 3196 case IBMVFC_MAD_DRIVER_FAILED:
3135 break; 3197 break;
3136 case IBMVFC_MAD_FAILED: 3198 case IBMVFC_MAD_FAILED:
3137 default: 3199 default:
3138 tgt->need_login = 1; 3200 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3139 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 3201 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3140 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 3202 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3141 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3203 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3322 struct ibmvfc_host *vhost = evt->vhost; 3384 struct ibmvfc_host *vhost = evt->vhost;
3323 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; 3385 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3324 u32 status = rsp->common.status; 3386 u32 status = rsp->common.status;
3387 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3325 3388
3326 vhost->discovery_threads--; 3389 vhost->discovery_threads--;
3327 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3390 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3341 break; 3404 break;
3342 case IBMVFC_MAD_FAILED: 3405 case IBMVFC_MAD_FAILED:
3343 default: 3406 default:
3344 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3345 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3346 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3347 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3348
3349 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && 3407 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3350 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && 3408 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3351 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) 3409 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3352 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3410 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3353 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3411 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3354 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3412 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3355 else 3413 else
3356 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3414 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3415
3416 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3417 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3418 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3419 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3357 break; 3420 break;
3358 }; 3421 };
3359 3422
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3420 } 3483 }
3421 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3484 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3422 3485
3423 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3486 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3424 if (!tgt) { 3487 if (!tgt) {
3425 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", 3488 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3426 scsi_id); 3489 scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3472 struct ibmvfc_host *vhost = evt->vhost; 3535 struct ibmvfc_host *vhost = evt->vhost;
3473 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; 3536 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3474 u32 mad_status = rsp->common.status; 3537 u32 mad_status = rsp->common.status;
3538 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3475 3539
3476 switch (mad_status) { 3540 switch (mad_status) {
3477 case IBMVFC_MAD_SUCCESS: 3541 case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3480 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); 3544 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3481 break; 3545 break;
3482 case IBMVFC_MAD_FAILED: 3546 case IBMVFC_MAD_FAILED:
3483 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", 3547 level += ibmvfc_retry_host_init(vhost);
3484 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); 3548 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3485 ibmvfc_retry_host_init(vhost); 3549 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3486 break; 3550 break;
3487 case IBMVFC_MAD_DRIVER_FAILED: 3551 case IBMVFC_MAD_DRIVER_FAILED:
3488 break; 3552 break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3534 u32 mad_status = evt->xfer_iu->npiv_login.common.status; 3598 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3535 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; 3599 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3536 unsigned int npiv_max_sectors; 3600 unsigned int npiv_max_sectors;
3601 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537 3602
3538 switch (mad_status) { 3603 switch (mad_status) {
3539 case IBMVFC_MAD_SUCCESS: 3604 case IBMVFC_MAD_SUCCESS:
3540 ibmvfc_free_event(evt); 3605 ibmvfc_free_event(evt);
3541 break; 3606 break;
3542 case IBMVFC_MAD_FAILED: 3607 case IBMVFC_MAD_FAILED:
3543 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3544 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3545 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3608 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3546 ibmvfc_retry_host_init(vhost); 3609 level += ibmvfc_retry_host_init(vhost);
3547 else 3610 else
3548 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 3611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3612 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3613 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3549 ibmvfc_free_event(evt); 3614 ibmvfc_free_event(evt);
3550 return; 3615 return;
3551 case IBMVFC_MAD_CRQ_ERROR: 3616 case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3578 return; 3643 return;
3579 } 3644 }
3580 3645
3646 vhost->logged_in = 1;
3581 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); 3647 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3582 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 3648 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3583 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 3649 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3636}; 3702};
3637 3703
3638/** 3704/**
3705 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
3706 * @vhost: ibmvfc host struct
3707 *
3708 **/
3709static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3710{
3711 struct ibmvfc_host *vhost = evt->vhost;
3712 u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
3713
3714 ibmvfc_free_event(evt);
3715
3716 switch (mad_status) {
3717 case IBMVFC_MAD_SUCCESS:
3718 if (list_empty(&vhost->sent) &&
3719 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3720 ibmvfc_init_host(vhost, 0);
3721 return;
3722 }
3723 break;
3724 case IBMVFC_MAD_FAILED:
3725 case IBMVFC_MAD_NOT_SUPPORTED:
3726 case IBMVFC_MAD_CRQ_ERROR:
3727 case IBMVFC_MAD_DRIVER_FAILED:
3728 default:
3729 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
3730 break;
3731 }
3732
3733 ibmvfc_hard_reset_host(vhost);
3734}
3735
3736/**
3737 * ibmvfc_npiv_logout - Issue an NPIV Logout
3738 * @vhost: ibmvfc host struct
3739 *
3740 **/
3741static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
3742{
3743 struct ibmvfc_npiv_logout_mad *mad;
3744 struct ibmvfc_event *evt;
3745
3746 evt = ibmvfc_get_event(vhost);
3747 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
3748
3749 mad = &evt->iu.npiv_logout;
3750 memset(mad, 0, sizeof(*mad));
3751 mad->common.version = 1;
3752 mad->common.opcode = IBMVFC_NPIV_LOGOUT;
3753 mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
3754
3755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
3756
3757 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3758 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
3759 else
3760 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3761}
3762
3763/**
3639 * ibmvfc_dev_init_to_do - Is there target initialization work to do? 3764 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3640 * @vhost: ibmvfc host struct 3765 * @vhost: ibmvfc host struct
3641 * 3766 *
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3671 switch (vhost->action) { 3796 switch (vhost->action) {
3672 case IBMVFC_HOST_ACTION_NONE: 3797 case IBMVFC_HOST_ACTION_NONE:
3673 case IBMVFC_HOST_ACTION_INIT_WAIT: 3798 case IBMVFC_HOST_ACTION_INIT_WAIT:
3799 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3674 return 0; 3800 return 0;
3675 case IBMVFC_HOST_ACTION_TGT_INIT: 3801 case IBMVFC_HOST_ACTION_TGT_INIT:
3676 case IBMVFC_HOST_ACTION_QUERY_TGTS: 3802 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3683 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) 3809 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3684 return 0; 3810 return 0;
3685 return 1; 3811 return 1;
3812 case IBMVFC_HOST_ACTION_LOGO:
3686 case IBMVFC_HOST_ACTION_INIT: 3813 case IBMVFC_HOST_ACTION_INIT:
3687 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3814 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3688 case IBMVFC_HOST_ACTION_TGT_ADD:
3689 case IBMVFC_HOST_ACTION_TGT_DEL: 3815 case IBMVFC_HOST_ACTION_TGT_DEL:
3690 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 3816 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3691 case IBMVFC_HOST_ACTION_QUERY: 3817 case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3740static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) 3866static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3741{ 3867{
3742 struct ibmvfc_host *vhost = tgt->vhost; 3868 struct ibmvfc_host *vhost = tgt->vhost;
3743 struct fc_rport *rport = tgt->rport; 3869 struct fc_rport *rport;
3744 unsigned long flags; 3870 unsigned long flags;
3745 3871
3746 if (rport) { 3872 tgt_dbg(tgt, "Adding rport\n");
3747 tgt_dbg(tgt, "Setting rport roles\n"); 3873 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3748 fc_remote_port_rolechg(rport, tgt->ids.roles); 3874 spin_lock_irqsave(vhost->host->host_lock, flags);
3749 spin_lock_irqsave(vhost->host->host_lock, flags); 3875
3750 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3876 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3877 tgt_dbg(tgt, "Deleting rport\n");
3878 list_del(&tgt->queue);
3751 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3879 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3880 fc_remote_port_delete(rport);
3881 del_timer_sync(&tgt->timer);
3882 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 return; 3883 return;
3753 } 3884 }
3754 3885
3755 tgt_dbg(tgt, "Adding rport\n");
3756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3757 spin_lock_irqsave(vhost->host->host_lock, flags);
3758 tgt->rport = rport;
3759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3760 if (rport) { 3886 if (rport) {
3761 tgt_dbg(tgt, "rport add succeeded\n"); 3887 tgt_dbg(tgt, "rport add succeeded\n");
3888 tgt->rport = rport;
3762 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3889 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3763 rport->supported_classes = 0; 3890 rport->supported_classes = 0;
3764 tgt->target_id = rport->scsi_target_id; 3891 tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3789 vhost->events_to_log = 0; 3916 vhost->events_to_log = 0;
3790 switch (vhost->action) { 3917 switch (vhost->action) {
3791 case IBMVFC_HOST_ACTION_NONE: 3918 case IBMVFC_HOST_ACTION_NONE:
3919 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3792 case IBMVFC_HOST_ACTION_INIT_WAIT: 3920 case IBMVFC_HOST_ACTION_INIT_WAIT:
3793 break; 3921 break;
3922 case IBMVFC_HOST_ACTION_LOGO:
3923 vhost->job_step(vhost);
3924 break;
3794 case IBMVFC_HOST_ACTION_INIT: 3925 case IBMVFC_HOST_ACTION_INIT:
3795 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3926 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3796 if (vhost->delay_init) { 3927 if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3836 3967
3837 if (vhost->state == IBMVFC_INITIALIZING) { 3968 if (vhost->state == IBMVFC_INITIALIZING) {
3838 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { 3969 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3839 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3970 if (vhost->reinit) {
3840 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); 3971 vhost->reinit = 0;
3841 vhost->init_retries = 0; 3972 scsi_block_requests(vhost->host);
3842 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3973 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3843 scsi_unblock_requests(vhost->host); 3974 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3975 } else {
3976 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3977 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3978 wake_up(&vhost->init_wait_q);
3979 schedule_work(&vhost->rport_add_work_q);
3980 vhost->init_retries = 0;
3981 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3982 scsi_unblock_requests(vhost->host);
3983 }
3984
3844 return; 3985 return;
3845 } else { 3986 } else {
3846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3987 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3871 if (!ibmvfc_dev_init_to_do(vhost)) 4012 if (!ibmvfc_dev_init_to_do(vhost))
3872 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); 4013 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3873 break; 4014 break;
3874 case IBMVFC_HOST_ACTION_TGT_ADD:
3875 list_for_each_entry(tgt, &vhost->targets, queue) {
3876 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3877 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3878 ibmvfc_tgt_add_rport(tgt);
3879 return;
3880 }
3881 }
3882
3883 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3884 vhost->reinit = 0;
3885 scsi_block_requests(vhost->host);
3886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3887 } else {
3888 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3889 wake_up(&vhost->init_wait_q);
3890 }
3891 break;
3892 default: 4015 default:
3893 break; 4016 break;
3894 }; 4017 };
@@ -4118,6 +4241,56 @@ nomem:
4118} 4241}
4119 4242
4120/** 4243/**
4244 * ibmvfc_rport_add_thread - Worker thread for rport adds
4245 * @work: work struct
4246 *
4247 **/
4248static void ibmvfc_rport_add_thread(struct work_struct *work)
4249{
4250 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4251 rport_add_work_q);
4252 struct ibmvfc_target *tgt;
4253 struct fc_rport *rport;
4254 unsigned long flags;
4255 int did_work;
4256
4257 ENTER;
4258 spin_lock_irqsave(vhost->host->host_lock, flags);
4259 do {
4260 did_work = 0;
4261 if (vhost->state != IBMVFC_ACTIVE)
4262 break;
4263
4264 list_for_each_entry(tgt, &vhost->targets, queue) {
4265 if (tgt->add_rport) {
4266 did_work = 1;
4267 tgt->add_rport = 0;
4268 kref_get(&tgt->kref);
4269 rport = tgt->rport;
4270 if (!rport) {
4271 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4272 ibmvfc_tgt_add_rport(tgt);
4273 } else if (get_device(&rport->dev)) {
4274 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4275 tgt_dbg(tgt, "Setting rport roles\n");
4276 fc_remote_port_rolechg(rport, tgt->ids.roles);
4277 put_device(&rport->dev);
4278 }
4279
4280 kref_put(&tgt->kref, ibmvfc_release_tgt);
4281 spin_lock_irqsave(vhost->host->host_lock, flags);
4282 break;
4283 }
4284 }
4285 } while(did_work);
4286
4287 if (vhost->state == IBMVFC_ACTIVE)
4288 vhost->scan_complete = 1;
4289 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4290 LEAVE;
4291}
4292
4293/**
4121 * ibmvfc_probe - Adapter hot plug add entry point 4294 * ibmvfc_probe - Adapter hot plug add entry point
4122 * @vdev: vio device struct 4295 * @vdev: vio device struct
4123 * @id: vio device id struct 4296 * @id: vio device id struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4160 strcpy(vhost->partition_name, "UNKNOWN"); 4333 strcpy(vhost->partition_name, "UNKNOWN");
4161 init_waitqueue_head(&vhost->work_wait_q); 4334 init_waitqueue_head(&vhost->work_wait_q);
4162 init_waitqueue_head(&vhost->init_wait_q); 4335 init_waitqueue_head(&vhost->init_wait_q);
4336 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4163 4337
4164 if ((rc = ibmvfc_alloc_mem(vhost))) 4338 if ((rc = ibmvfc_alloc_mem(vhost)))
4165 goto free_scsi_host; 4339 goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a756..c2668d7d67f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.5" 32#define IBMVFC_DRIVER_VERSION "1.0.6"
33#define IBMVFC_DRIVER_DATE "(March 19, 2009)" 33#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
57 * Ensure we have resources for ERP and initialization: 57 * Ensure we have resources for ERP and initialization:
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout
60 * 2 for each discovery thread 61 * 2 for each discovery thread
61 */ 62 */
62#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) 63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
63 64
64#define IBMVFC_MAD_SUCCESS 0x00 65#define IBMVFC_MAD_SUCCESS 0x00
65#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
127 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 128 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
128 IBMVFC_PASSTHRU = 0x0200, 129 IBMVFC_PASSTHRU = 0x0200,
129 IBMVFC_TMF_MAD = 0x0100, 130 IBMVFC_TMF_MAD = 0x0100,
131 IBMVFC_NPIV_LOGOUT = 0x0800,
130}; 132};
131 133
132struct ibmvfc_mad_common { 134struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
143 struct srp_direct_buf buffer; 145 struct srp_direct_buf buffer;
144}__attribute__((packed, aligned (8))); 146}__attribute__((packed, aligned (8)));
145 147
148struct ibmvfc_npiv_logout_mad {
149 struct ibmvfc_mad_common common;
150}__attribute__((packed, aligned (8)));
151
146#define IBMVFC_MAX_NAME 256 152#define IBMVFC_MAX_NAME 256
147 153
148struct ibmvfc_npiv_login { 154struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
201#define IBMVFC_NATIVE_FC 0x01 207#define IBMVFC_NATIVE_FC 0x01
202#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 208#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
203 u32 reserved; 209 u32 reserved;
204 u64 capabilites; 210 u64 capabilities;
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
205 u32 max_cmds; 212 u32 max_cmds;
206 u32 scsi_id_sz; 213 u32 scsi_id_sz;
207 u64 max_dma_len; 214 u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
541 dma_addr_t msg_token; 548 dma_addr_t msg_token;
542}; 549};
543 550
551enum ibmvfc_ae_link_state {
552 IBMVFC_AE_LS_LINK_UP = 0x01,
553 IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
554 IBMVFC_AE_LS_LINK_DOWN = 0x04,
555 IBMVFC_AE_LS_LINK_DEAD = 0x08,
556};
557
544struct ibmvfc_async_crq { 558struct ibmvfc_async_crq {
545 volatile u8 valid; 559 volatile u8 valid;
546 u8 pad[3]; 560 u8 link_state;
561 u8 pad[2];
547 u32 pad2; 562 u32 pad2;
548 volatile u64 event; 563 volatile u64 event;
549 volatile u64 scsi_id; 564 volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
561union ibmvfc_iu { 576union ibmvfc_iu {
562 struct ibmvfc_mad_common mad_common; 577 struct ibmvfc_mad_common mad_common;
563 struct ibmvfc_npiv_login_mad npiv_login; 578 struct ibmvfc_npiv_login_mad npiv_login;
579 struct ibmvfc_npiv_logout_mad npiv_logout;
564 struct ibmvfc_discover_targets discover_targets; 580 struct ibmvfc_discover_targets discover_targets;
565 struct ibmvfc_port_login plogi; 581 struct ibmvfc_port_login plogi;
566 struct ibmvfc_process_login prli; 582 struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
575 IBMVFC_TGT_ACTION_NONE = 0, 591 IBMVFC_TGT_ACTION_NONE = 0,
576 IBMVFC_TGT_ACTION_INIT, 592 IBMVFC_TGT_ACTION_INIT,
577 IBMVFC_TGT_ACTION_INIT_WAIT, 593 IBMVFC_TGT_ACTION_INIT_WAIT,
578 IBMVFC_TGT_ACTION_ADD_RPORT,
579 IBMVFC_TGT_ACTION_DEL_RPORT, 594 IBMVFC_TGT_ACTION_DEL_RPORT,
580}; 595};
581 596
@@ -588,6 +603,7 @@ struct ibmvfc_target {
588 int target_id; 603 int target_id;
589 enum ibmvfc_target_action action; 604 enum ibmvfc_target_action action;
590 int need_login; 605 int need_login;
606 int add_rport;
591 int init_retries; 607 int init_retries;
592 u32 cancel_key; 608 u32 cancel_key;
593 struct ibmvfc_service_parms service_parms; 609 struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
627 643
628enum ibmvfc_host_action { 644enum ibmvfc_host_action {
629 IBMVFC_HOST_ACTION_NONE = 0, 645 IBMVFC_HOST_ACTION_NONE = 0,
646 IBMVFC_HOST_ACTION_LOGO,
647 IBMVFC_HOST_ACTION_LOGO_WAIT,
630 IBMVFC_HOST_ACTION_INIT, 648 IBMVFC_HOST_ACTION_INIT,
631 IBMVFC_HOST_ACTION_INIT_WAIT, 649 IBMVFC_HOST_ACTION_INIT_WAIT,
632 IBMVFC_HOST_ACTION_QUERY, 650 IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
635 IBMVFC_HOST_ACTION_ALLOC_TGTS, 653 IBMVFC_HOST_ACTION_ALLOC_TGTS,
636 IBMVFC_HOST_ACTION_TGT_INIT, 654 IBMVFC_HOST_ACTION_TGT_INIT,
637 IBMVFC_HOST_ACTION_TGT_DEL_FAILED, 655 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
638 IBMVFC_HOST_ACTION_TGT_ADD,
639}; 656};
640 657
641enum ibmvfc_host_state { 658enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
682 int client_migrated; 699 int client_migrated;
683 int reinit; 700 int reinit;
684 int delay_init; 701 int delay_init;
702 int scan_complete;
703 int logged_in;
685 int events_to_log; 704 int events_to_log;
686#define IBMVFC_AE_LINKUP 0x0001 705#define IBMVFC_AE_LINKUP 0x0001
687#define IBMVFC_AE_LINKDOWN 0x0002 706#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
692 void (*job_step) (struct ibmvfc_host *); 711 void (*job_step) (struct ibmvfc_host *);
693 struct task_struct *work_thread; 712 struct task_struct *work_thread;
694 struct tasklet_struct tasklet; 713 struct tasklet_struct tasklet;
714 struct work_struct rport_add_work_q;
695 wait_queue_head_t init_wait_q; 715 wait_queue_head_t init_wait_q;
696 wait_queue_head_t work_wait_q; 716 wait_queue_head_t work_wait_q;
697}; 717};
@@ -707,6 +727,12 @@ struct ibmvfc_host {
707#define tgt_err(t, fmt, ...) \ 727#define tgt_err(t, fmt, ...) \
708 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 728 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
709 729
730#define tgt_log(t, level, fmt, ...) \
731 do { \
732 if ((t)->vhost->log_level >= level) \
733 tgt_err(t, fmt, ##__VA_ARGS__); \
734 } while (0)
735
710#define ibmvfc_dbg(vhost, ...) \ 736#define ibmvfc_dbg(vhost, ...) \
711 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 737 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
712 738
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e40..11d2602ae88 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h>
73#include <asm/firmware.h> 74#include <asm/firmware.h>
74#include <asm/vio.h> 75#include <asm/vio.h>
75#include <asm/firmware.h> 76#include <asm/firmware.h>
@@ -87,9 +88,15 @@
87 */ 88 */
88static int max_id = 64; 89static int max_id = 64;
89static int max_channel = 3; 90static int max_channel = 3;
90static int init_timeout = 5; 91static int init_timeout = 300;
92static int login_timeout = 60;
93static int info_timeout = 30;
94static int abort_timeout = 60;
95static int reset_timeout = 60;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 96static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 97static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
98static int fast_fail = 1;
99static int client_reserve = 1;
93 100
94static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
95 102
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 117MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
111module_param_named(max_requests, max_requests, int, S_IRUGO); 118module_param_named(max_requests, max_requests, int, S_IRUGO);
112MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 119MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
120module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
122module_param_named(client_reserve, client_reserve, int, S_IRUGO );
123MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
113 124
114/* ------------------------------------------------------------ 125/* ------------------------------------------------------------
115 * Routines for the event pool and event structs 126 * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
781/* ------------------------------------------------------------ 792/* ------------------------------------------------------------
782 * Routines for driver initialization 793 * Routines for driver initialization
783 */ 794 */
795
784/** 796/**
785 * adapter_info_rsp: - Handle response to MAD adapter info request 797 * map_persist_bufs: - Pre-map persistent data for adapter logins
786 * @evt_struct: srp_event_struct with the response 798 * @hostdata: ibmvscsi_host_data of host
787 * 799 *
788 * Used as a "done" callback by when sending adapter_info. Gets called 800 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
789 * by ibmvscsi_handle_crq() 801 * Return 1 on error, 0 on success.
790*/ 802 */
791static void adapter_info_rsp(struct srp_event_struct *evt_struct) 803static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
792{ 804{
793 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
794 dma_unmap_single(hostdata->dev,
795 evt_struct->iu.mad.adapter_info.buffer,
796 evt_struct->iu.mad.adapter_info.common.length,
797 DMA_BIDIRECTIONAL);
798 805
799 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 806 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
800 dev_err(hostdata->dev, "error %d getting adapter info\n", 807 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
801 evt_struct->xfer_iu->mad.adapter_info.common.status); 808
802 } else { 809 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
803 dev_info(hostdata->dev, "host srp version: %s, " 810 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
804 "host partition %s (%d), OS %d, max io %u\n", 811 return 1;
805 hostdata->madapter_info.srp_version,
806 hostdata->madapter_info.partition_name,
807 hostdata->madapter_info.partition_number,
808 hostdata->madapter_info.os_type,
809 hostdata->madapter_info.port_max_txu[0]);
810
811 if (hostdata->madapter_info.port_max_txu[0])
812 hostdata->host->max_sectors =
813 hostdata->madapter_info.port_max_txu[0] >> 9;
814
815 if (hostdata->madapter_info.os_type == 3 &&
816 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
817 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
818 hostdata->madapter_info.srp_version);
819 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
820 MAX_INDIRECT_BUFS);
821 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
822 }
823 } 812 }
813
814 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
815 &hostdata->madapter_info,
816 sizeof(hostdata->madapter_info),
817 DMA_BIDIRECTIONAL);
818 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
819 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
820 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
821 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
822 return 1;
823 }
824
825 return 0;
824} 826}
825 827
826/** 828/**
827 * send_mad_adapter_info: - Sends the mad adapter info request 829 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
828 * and stores the result so it can be retrieved with 830 * @hostdata: ibmvscsi_host_data of host
829 * sysfs. We COULD consider causing a failure if the 831 *
830 * returned SRP version doesn't match ours. 832 * Unmap the capabilities and adapter info DMA buffers
831 * @hostdata: ibmvscsi_host_data of host 833 */
832 * 834static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
833 * Returns zero if successful.
834*/
835static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
836{ 835{
837 struct viosrp_adapter_info *req; 836 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
838 struct srp_event_struct *evt_struct; 837 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
839 unsigned long flags;
840 dma_addr_t addr;
841
842 evt_struct = get_event_struct(&hostdata->pool);
843 if (!evt_struct) {
844 dev_err(hostdata->dev,
845 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
846 return;
847 }
848
849 init_event_struct(evt_struct,
850 adapter_info_rsp,
851 VIOSRP_MAD_FORMAT,
852 init_timeout);
853
854 req = &evt_struct->iu.mad.adapter_info;
855 memset(req, 0x00, sizeof(*req));
856
857 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
858 req->common.length = sizeof(hostdata->madapter_info);
859 req->buffer = addr = dma_map_single(hostdata->dev,
860 &hostdata->madapter_info,
861 sizeof(hostdata->madapter_info),
862 DMA_BIDIRECTIONAL);
863 838
864 if (dma_mapping_error(hostdata->dev, req->buffer)) { 839 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
865 if (!firmware_has_feature(FW_FEATURE_CMO)) 840 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
866 dev_err(hostdata->dev, 841}
867 "Unable to map request_buffer for "
868 "adapter_info!\n");
869 free_event_struct(&hostdata->pool, evt_struct);
870 return;
871 }
872
873 spin_lock_irqsave(hostdata->host->host_lock, flags);
874 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
875 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
876 dma_unmap_single(hostdata->dev,
877 addr,
878 sizeof(hostdata->madapter_info),
879 DMA_BIDIRECTIONAL);
880 }
881 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
882};
883 842
884/** 843/**
885 * login_rsp: - Handle response to SRP login request 844 * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
909 } 868 }
910 869
911 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); 870 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
912 871 hostdata->client_migrated = 0;
913 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
914 dev_err(hostdata->dev, "Invalid request_limit.\n");
915 872
916 /* Now we know what the real request-limit is. 873 /* Now we know what the real request-limit is.
917 * This value is set rather than added to request_limit because 874 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
922 879
923 /* If we had any pending I/Os, kick them */ 880 /* If we had any pending I/Os, kick them */
924 scsi_unblock_requests(hostdata->host); 881 scsi_unblock_requests(hostdata->host);
925
926 send_mad_adapter_info(hostdata);
927 return;
928} 882}
929 883
930/** 884/**
931 * send_srp_login: - Sends the srp login 885 * send_srp_login: - Sends the srp login
932 * @hostdata: ibmvscsi_host_data of host 886 * @hostdata: ibmvscsi_host_data of host
933 * 887 *
934 * Returns zero if successful. 888 * Returns zero if successful.
935*/ 889*/
936static int send_srp_login(struct ibmvscsi_host_data *hostdata) 890static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
939 unsigned long flags; 893 unsigned long flags;
940 struct srp_login_req *login; 894 struct srp_login_req *login;
941 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 895 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
942 if (!evt_struct) {
943 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
944 return FAILED;
945 }
946 896
947 init_event_struct(evt_struct, 897 BUG_ON(!evt_struct);
948 login_rsp, 898 init_event_struct(evt_struct, login_rsp,
949 VIOSRP_SRP_FORMAT, 899 VIOSRP_SRP_FORMAT, login_timeout);
950 init_timeout);
951 900
952 login = &evt_struct->iu.srp.login_req; 901 login = &evt_struct->iu.srp.login_req;
953 memset(login, 0x00, sizeof(struct srp_login_req)); 902 memset(login, 0, sizeof(*login));
954 login->opcode = SRP_LOGIN_REQ; 903 login->opcode = SRP_LOGIN_REQ;
955 login->req_it_iu_len = sizeof(union srp_iu); 904 login->req_it_iu_len = sizeof(union srp_iu);
956 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 905 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
957 906
958 spin_lock_irqsave(hostdata->host->host_lock, flags); 907 spin_lock_irqsave(hostdata->host->host_lock, flags);
959 /* Start out with a request limit of 0, since this is negotiated in 908 /* Start out with a request limit of 0, since this is negotiated in
960 * the login request we are just sending and login requests always 909 * the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
962 */ 911 */
963 atomic_set(&hostdata->request_limit, 0); 912 atomic_set(&hostdata->request_limit, 0);
964 913
965 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 914 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 915 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 dev_info(hostdata->dev, "sent SRP login\n"); 916 dev_info(hostdata->dev, "sent SRP login\n");
968 return rc; 917 return rc;
969}; 918};
970 919
971/** 920/**
921 * capabilities_rsp: - Handle response to MAD adapter capabilities request
922 * @evt_struct: srp_event_struct with the response
923 *
924 * Used as a "done" callback by when sending adapter_info.
925 */
926static void capabilities_rsp(struct srp_event_struct *evt_struct)
927{
928 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
929
930 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
931 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
932 evt_struct->xfer_iu->mad.capabilities.common.status);
933 } else {
934 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
935 dev_info(hostdata->dev, "Partition migration not supported\n");
936
937 if (client_reserve) {
938 if (hostdata->caps.reserve.common.server_support ==
939 SERVER_SUPPORTS_CAP)
940 dev_info(hostdata->dev, "Client reserve enabled\n");
941 else
942 dev_info(hostdata->dev, "Client reserve not supported\n");
943 }
944 }
945
946 send_srp_login(hostdata);
947}
948
949/**
950 * send_mad_capabilities: - Sends the mad capabilities request
951 * and stores the result so it can be retrieved with
952 * @hostdata: ibmvscsi_host_data of host
953 */
954static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
955{
956 struct viosrp_capabilities *req;
957 struct srp_event_struct *evt_struct;
958 unsigned long flags;
959 struct device_node *of_node = hostdata->dev->archdata.of_node;
960 const char *location;
961
962 evt_struct = get_event_struct(&hostdata->pool);
963 BUG_ON(!evt_struct);
964
965 init_event_struct(evt_struct, capabilities_rsp,
966 VIOSRP_MAD_FORMAT, info_timeout);
967
968 req = &evt_struct->iu.mad.capabilities;
969 memset(req, 0, sizeof(*req));
970
971 hostdata->caps.flags = CAP_LIST_SUPPORTED;
972 if (hostdata->client_migrated)
973 hostdata->caps.flags |= CLIENT_MIGRATED;
974
975 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
976 sizeof(hostdata->caps.name));
977 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
978
979 location = of_get_property(of_node, "ibm,loc-code", NULL);
980 location = location ? location : dev_name(hostdata->dev);
981 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
982 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
983
984 req->common.type = VIOSRP_CAPABILITIES_TYPE;
985 req->buffer = hostdata->caps_addr;
986
987 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
988 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
989 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
990 hostdata->caps.migration.ecl = 1;
991
992 if (client_reserve) {
993 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
994 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
995 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
996 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
997 req->common.length = sizeof(hostdata->caps);
998 } else
999 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
1000
1001 spin_lock_irqsave(hostdata->host->host_lock, flags);
1002 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1003 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1004 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1005};
1006
1007/**
1008 * fast_fail_rsp: - Handle response to MAD enable fast fail
1009 * @evt_struct: srp_event_struct with the response
1010 *
1011 * Used as a "done" callback by when sending enable fast fail. Gets called
1012 * by ibmvscsi_handle_crq()
1013 */
1014static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1015{
1016 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1017 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
1018
1019 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1020 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1021 else if (status == VIOSRP_MAD_FAILED)
1022 dev_err(hostdata->dev, "fast_fail request failed\n");
1023 else if (status != VIOSRP_MAD_SUCCESS)
1024 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1025
1026 send_mad_capabilities(hostdata);
1027}
1028
1029/**
1030 * init_host - Start host initialization
1031 * @hostdata: ibmvscsi_host_data of host
1032 *
1033 * Returns zero if successful.
1034 */
1035static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1036{
1037 int rc;
1038 unsigned long flags;
1039 struct viosrp_fast_fail *fast_fail_mad;
1040 struct srp_event_struct *evt_struct;
1041
1042 if (!fast_fail) {
1043 send_mad_capabilities(hostdata);
1044 return 0;
1045 }
1046
1047 evt_struct = get_event_struct(&hostdata->pool);
1048 BUG_ON(!evt_struct);
1049
1050 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1051
1052 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1053 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1054 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1055 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1056
1057 spin_lock_irqsave(hostdata->host->host_lock, flags);
1058 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1059 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1060 return rc;
1061}
1062
1063/**
1064 * adapter_info_rsp: - Handle response to MAD adapter info request
1065 * @evt_struct: srp_event_struct with the response
1066 *
1067 * Used as a "done" callback by when sending adapter_info. Gets called
1068 * by ibmvscsi_handle_crq()
1069*/
1070static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1071{
1072 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1073
1074 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1075 dev_err(hostdata->dev, "error %d getting adapter info\n",
1076 evt_struct->xfer_iu->mad.adapter_info.common.status);
1077 } else {
1078 dev_info(hostdata->dev, "host srp version: %s, "
1079 "host partition %s (%d), OS %d, max io %u\n",
1080 hostdata->madapter_info.srp_version,
1081 hostdata->madapter_info.partition_name,
1082 hostdata->madapter_info.partition_number,
1083 hostdata->madapter_info.os_type,
1084 hostdata->madapter_info.port_max_txu[0]);
1085
1086 if (hostdata->madapter_info.port_max_txu[0])
1087 hostdata->host->max_sectors =
1088 hostdata->madapter_info.port_max_txu[0] >> 9;
1089
1090 if (hostdata->madapter_info.os_type == 3 &&
1091 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1092 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1093 hostdata->madapter_info.srp_version);
1094 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 }
1098 }
1099
1100 enable_fast_fail(hostdata);
1101}
1102
1103/**
1104 * send_mad_adapter_info: - Sends the mad adapter info request
1105 * and stores the result so it can be retrieved with
1106 * sysfs. We COULD consider causing a failure if the
1107 * returned SRP version doesn't match ours.
1108 * @hostdata: ibmvscsi_host_data of host
1109 *
1110 * Returns zero if successful.
1111*/
1112static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1113{
1114 struct viosrp_adapter_info *req;
1115 struct srp_event_struct *evt_struct;
1116 unsigned long flags;
1117
1118 evt_struct = get_event_struct(&hostdata->pool);
1119 BUG_ON(!evt_struct);
1120
1121 init_event_struct(evt_struct,
1122 adapter_info_rsp,
1123 VIOSRP_MAD_FORMAT,
1124 info_timeout);
1125
1126 req = &evt_struct->iu.mad.adapter_info;
1127 memset(req, 0x00, sizeof(*req));
1128
1129 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1130 req->common.length = sizeof(hostdata->madapter_info);
1131 req->buffer = hostdata->adapter_info_addr;
1132
1133 spin_lock_irqsave(hostdata->host->host_lock, flags);
1134 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1135 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1137};
1138
1139/**
1140 * init_adapter: Start virtual adapter initialization sequence
1141 *
1142 */
1143static void init_adapter(struct ibmvscsi_host_data *hostdata)
1144{
1145 send_mad_adapter_info(hostdata);
1146}
1147
1148/**
972 * sync_completion: Signal that a synchronous command has completed 1149 * sync_completion: Signal that a synchronous command has completed
973 * Note that after returning from this call, the evt_struct is freed. 1150 * Note that after returning from this call, the evt_struct is freed.
974 * the caller waiting on this completion shouldn't touch the evt_struct 1151 * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1029 init_event_struct(evt, 1206 init_event_struct(evt,
1030 sync_completion, 1207 sync_completion,
1031 VIOSRP_SRP_FORMAT, 1208 VIOSRP_SRP_FORMAT,
1032 init_timeout); 1209 abort_timeout);
1033 1210
1034 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1211 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1035 1212
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1043 evt->sync_srp = &srp_rsp; 1220 evt->sync_srp = &srp_rsp;
1044 1221
1045 init_completion(&evt->comp); 1222 init_completion(&evt->comp);
1046 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1223 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1047 1224
1048 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1225 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1049 break; 1226 break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1152 init_event_struct(evt, 1329 init_event_struct(evt,
1153 sync_completion, 1330 sync_completion,
1154 VIOSRP_SRP_FORMAT, 1331 VIOSRP_SRP_FORMAT,
1155 init_timeout); 1332 reset_timeout);
1156 1333
1157 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1334 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1158 1335
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1165 evt->sync_srp = &srp_rsp; 1342 evt->sync_srp = &srp_rsp;
1166 1343
1167 init_completion(&evt->comp); 1344 init_completion(&evt->comp);
1168 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1345 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1169 1346
1170 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1347 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1171 break; 1348 break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1281 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1458 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1282 0xC002000000000000LL, 0)) == 0) { 1459 0xC002000000000000LL, 0)) == 0) {
1283 /* Now login */ 1460 /* Now login */
1284 send_srp_login(hostdata); 1461 init_adapter(hostdata);
1285 } else { 1462 } else {
1286 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); 1463 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1287 } 1464 }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1291 dev_info(hostdata->dev, "partner initialization complete\n"); 1468 dev_info(hostdata->dev, "partner initialization complete\n");
1292 1469
1293 /* Now login */ 1470 /* Now login */
1294 send_srp_login(hostdata); 1471 init_adapter(hostdata);
1295 break; 1472 break;
1296 default: 1473 default:
1297 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); 1474 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1303 if (crq->format == 0x06) { 1480 if (crq->format == 0x06) {
1304 /* We need to re-setup the interpartition connection */ 1481 /* We need to re-setup the interpartition connection */
1305 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1482 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1483 hostdata->client_migrated = 1;
1306 purge_requests(hostdata, DID_REQUEUE); 1484 purge_requests(hostdata, DID_REQUEUE);
1307 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1485 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
1308 hostdata)) || 1486 hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1397 init_event_struct(evt_struct, 1575 init_event_struct(evt_struct,
1398 sync_completion, 1576 sync_completion,
1399 VIOSRP_MAD_FORMAT, 1577 VIOSRP_MAD_FORMAT,
1400 init_timeout); 1578 info_timeout);
1401 1579
1402 host_config = &evt_struct->iu.mad.host_config; 1580 host_config = &evt_struct->iu.mad.host_config;
1403 1581
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1419 1597
1420 init_completion(&evt_struct->comp); 1598 init_completion(&evt_struct->comp);
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1599 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 1600 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1423 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1601 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1424 if (rc == 0) 1602 if (rc == 0)
1425 wait_for_completion(&evt_struct->comp); 1603 wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1444 spin_lock_irqsave(shost->host_lock, lock_flags); 1622 spin_lock_irqsave(shost->host_lock, lock_flags);
1445 if (sdev->type == TYPE_DISK) { 1623 if (sdev->type == TYPE_DISK) {
1446 sdev->allow_restart = 1; 1624 sdev->allow_restart = 1;
1447 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1625 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1448 } 1626 }
1449 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1627 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1450 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1628 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1471/* ------------------------------------------------------------ 1649/* ------------------------------------------------------------
1472 * sysfs attributes 1650 * sysfs attributes
1473 */ 1651 */
1652static ssize_t show_host_vhost_loc(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct Scsi_Host *shost = class_to_shost(dev);
1656 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1657 int len;
1658
1659 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1660 hostdata->caps.loc);
1661 return len;
1662}
1663
1664static struct device_attribute ibmvscsi_host_vhost_loc = {
1665 .attr = {
1666 .name = "vhost_loc",
1667 .mode = S_IRUGO,
1668 },
1669 .show = show_host_vhost_loc,
1670};
1671
1672static ssize_t show_host_vhost_name(struct device *dev,
1673 struct device_attribute *attr, char *buf)
1674{
1675 struct Scsi_Host *shost = class_to_shost(dev);
1676 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1677 int len;
1678
1679 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1680 hostdata->caps.name);
1681 return len;
1682}
1683
1684static struct device_attribute ibmvscsi_host_vhost_name = {
1685 .attr = {
1686 .name = "vhost_name",
1687 .mode = S_IRUGO,
1688 },
1689 .show = show_host_vhost_name,
1690};
1691
1474static ssize_t show_host_srp_version(struct device *dev, 1692static ssize_t show_host_srp_version(struct device *dev,
1475 struct device_attribute *attr, char *buf) 1693 struct device_attribute *attr, char *buf)
1476{ 1694{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
1594}; 1812};
1595 1813
1596static struct device_attribute *ibmvscsi_attrs[] = { 1814static struct device_attribute *ibmvscsi_attrs[] = {
1815 &ibmvscsi_host_vhost_loc,
1816 &ibmvscsi_host_vhost_name,
1597 &ibmvscsi_host_srp_version, 1817 &ibmvscsi_host_srp_version,
1598 &ibmvscsi_host_partition_name, 1818 &ibmvscsi_host_partition_name,
1599 &ibmvscsi_host_partition_number, 1819 &ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1674 atomic_set(&hostdata->request_limit, -1); 1894 atomic_set(&hostdata->request_limit, -1);
1675 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1895 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1676 1896
1897 if (map_persist_bufs(hostdata)) {
1898 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1899 goto persist_bufs_failed;
1900 }
1901
1677 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1902 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1678 if (rc != 0 && rc != H_RESOURCE) { 1903 if (rc != 0 && rc != H_RESOURCE) {
1679 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1904 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1687 host->max_lun = 8; 1912 host->max_lun = 8;
1688 host->max_id = max_id; 1913 host->max_id = max_id;
1689 host->max_channel = max_channel; 1914 host->max_channel = max_channel;
1915 host->max_cmd_len = 16;
1690 1916
1691 if (scsi_add_host(hostdata->host, hostdata->dev)) 1917 if (scsi_add_host(hostdata->host, hostdata->dev))
1692 goto add_host_failed; 1918 goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1733 init_pool_failed: 1959 init_pool_failed:
1734 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1735 init_crq_failed: 1961 init_crq_failed:
1962 unmap_persist_bufs(hostdata);
1963 persist_bufs_failed:
1736 scsi_host_put(host); 1964 scsi_host_put(host);
1737 scsi_host_alloc_failed: 1965 scsi_host_alloc_failed:
1738 return -1; 1966 return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1741static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1742{ 1970{
1743 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1972 unmap_persist_bufs(hostdata);
1744 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1745 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1746 max_events); 1975 max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16..76425303def 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90/* all driver data associated with a host adapter */ 90/* all driver data associated with a host adapter */
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated;
93 struct device *dev; 94 struct device *dev;
94 struct event_pool pool; 95 struct event_pool pool;
95 struct crq_queue queue; 96 struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
97 struct list_head sent; 98 struct list_head sent;
98 struct Scsi_Host *host; 99 struct Scsi_Host *host;
99 struct mad_adapter_info_data madapter_info; 100 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps;
102 dma_addr_t caps_addr;
103 dma_addr_t adapter_info_addr;
100}; 104};
101 105
102/* routines for managing a command/response queue */ 106/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad..2cd735d1d19 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
37 37
38#define SRP_VERSION "16.a" 38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256 39#define SRP_MAX_IU_LEN 256
40#define SRP_MAX_LOC_LEN 32
40 41
41union srp_iu { 42union srp_iu {
42 struct srp_login_req login_req; 43 struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
86 VIOSRP_EMPTY_IU_TYPE = 0x01, 87 VIOSRP_EMPTY_IU_TYPE = 0x01,
87 VIOSRP_ERROR_LOG_TYPE = 0x02, 88 VIOSRP_ERROR_LOG_TYPE = 0x02,
88 VIOSRP_ADAPTER_INFO_TYPE = 0x03, 89 VIOSRP_ADAPTER_INFO_TYPE = 0x03,
89 VIOSRP_HOST_CONFIG_TYPE = 0x04 90 VIOSRP_HOST_CONFIG_TYPE = 0x04,
91 VIOSRP_CAPABILITIES_TYPE = 0x05,
92 VIOSRP_ENABLE_FAST_FAIL = 0x08,
93};
94
95enum viosrp_mad_status {
96 VIOSRP_MAD_SUCCESS = 0x00,
97 VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
98 VIOSRP_MAD_FAILED = 0xF7,
99};
100
101enum viosrp_capability_type {
102 MIGRATION_CAPABILITIES = 0x01,
103 RESERVATION_CAPABILITIES = 0x02,
104};
105
106enum viosrp_capability_support {
107 SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
108 SERVER_SUPPORTS_CAP = 0x01,
109 SERVER_CAP_DATA = 0x02,
110};
111
112enum viosrp_reserve_type {
113 CLIENT_RESERVE_SCSI_2 = 0x01,
114};
115
116enum viosrp_capability_flag {
117 CLIENT_MIGRATED = 0x01,
118 CLIENT_RECONNECT = 0x02,
119 CAP_LIST_SUPPORTED = 0x04,
120 CAP_LIST_DATA = 0x08,
90}; 121};
91 122
92/* 123/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
127 u64 buffer; 158 u64 buffer;
128}; 159};
129 160
161struct viosrp_fast_fail {
162 struct mad_common common;
163};
164
165struct viosrp_capabilities {
166 struct mad_common common;
167 u64 buffer;
168};
169
170struct mad_capability_common {
171 u32 cap_type;
172 u16 length;
173 u16 server_support;
174};
175
176struct mad_reserve_cap {
177 struct mad_capability_common common;
178 u32 type;
179};
180
181struct mad_migration_cap {
182 struct mad_capability_common common;
183 u32 ecl;
184};
185
186struct capabilities{
187 u32 flags;
188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration;
191 struct mad_reserve_cap reserve;
192};
193
130union mad_iu { 194union mad_iu {
131 struct viosrp_empty_iu empty_iu; 195 struct viosrp_empty_iu empty_iu;
132 struct viosrp_error_log error_log; 196 struct viosrp_error_log error_log;
133 struct viosrp_adapter_info adapter_info; 197 struct viosrp_adapter_info adapter_info;
134 struct viosrp_host_config host_config; 198 struct viosrp_host_config host_config;
199 struct viosrp_fast_fail fast_fail;
200 struct viosrp_capabilities capabilities;
135}; 201};
136 202
137union viosrp_iu { 203union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded860..0f8bc772b11 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
7003 ioa_cfg->sdt_state = ABORT_DUMP; 7003 ioa_cfg->sdt_state = ABORT_DUMP;
7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7005 ioa_cfg->in_ioa_bringdown = 1; 7005 ioa_cfg->in_ioa_bringdown = 1;
7006 ioa_cfg->allow_cmds = 0;
7006 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7007 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7008} 7009}
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
7688 * Return value: 7689 * Return value:
7689 * none 7690 * none
7690 **/ 7691 **/
7691static void ipr_remove(struct pci_dev *pdev) 7692static void __devexit ipr_remove(struct pci_dev *pdev)
7692{ 7693{
7693 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7694 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7694 7695
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
7864 .name = IPR_NAME, 7865 .name = IPR_NAME,
7865 .id_table = ipr_pci_table, 7866 .id_table = ipr_pci_table,
7866 .probe = ipr_probe, 7867 .probe = ipr_probe,
7867 .remove = ipr_remove, 7868 .remove = __devexit_p(ipr_remove),
7868 .shutdown = ipr_shutdown, 7869 .shutdown = ipr_shutdown,
7869 .err_handler = &ipr_err_handler, 7870 .err_handler = &ipr_err_handler,
7870}; 7871};
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf..7af9bceb8aa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1159 atomic_inc(&mp->stats.xid_not_found); 1159 atomic_inc(&mp->stats.xid_not_found);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 if (ep->esb_stat & ESB_ST_COMPLETE) {
1163 atomic_inc(&mp->stats.xid_not_found);
1164 goto out;
1165 }
1162 if (ep->rxid == FC_XID_UNKNOWN) 1166 if (ep->rxid == FC_XID_UNKNOWN)
1163 ep->rxid = ntohs(fh->fh_rx_id); 1167 ep->rxid = ntohs(fh->fh_rx_id);
1164 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1168 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b1..ad8b747837b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1897 break; 1897 break;
1898 case FC_CMD_ABORTED: 1898 case FC_CMD_ABORTED:
1899 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1899 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1900 break; 1900 break;
1901 case FC_CMD_TIME_OUT: 1901 case FC_CMD_TIME_OUT:
1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8a..7bfbff7e0ef 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
478 if (PTR_ERR(fp) == -FC_EX_CLOSED) 478 if (PTR_ERR(fp) == -FC_EX_CLOSED)
479 return fc_rport_error(rport, fp); 479 return fc_rport_error(rport, fp);
480 480
481 if (rdata->retries < rdata->local_port->max_retry_count) { 481 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
483 PTR_ERR(fp), fc_rport_state(rport)); 483 PTR_ERR(fp), fc_rport_state(rport));
484 rdata->retries++; 484 rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
1330} 1330}
1331EXPORT_SYMBOL(fc_rport_init); 1331EXPORT_SYMBOL(fc_rport_init);
1332 1332
1333int fc_setup_rport() 1333int fc_setup_rport(void)
1334{ 1334{
1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1336 if (!rport_event_queue) 1336 if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
1339} 1339}
1340EXPORT_SYMBOL(fc_setup_rport); 1340EXPORT_SYMBOL(fc_setup_rport);
1341 1341
1342void fc_destroy_rport() 1342void fc_destroy_rport(void)
1343{ 1343{
1344 destroy_workqueue(rport_event_queue); 1344 destroy_workqueue(rport_event_queue);
1345} 1345}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d3..59908aead53 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
81 struct Scsi_Host *shost = conn->session->host; 81 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost); 82 struct iscsi_host *ihost = shost_priv(shost);
83 83
84 queue_work(ihost->workq, &conn->xmitwork); 84 if (ihost->workq)
85 queue_work(ihost->workq, &conn->xmitwork);
85} 86}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 87EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87 88
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
109 * if the window closed with IO queued, then kick the 110 * if the window closed with IO queued, then kick the
110 * xmit thread 111 * xmit thread
111 */ 112 */
112 if (!list_empty(&session->leadconn->xmitqueue) || 113 if (!list_empty(&session->leadconn->cmdqueue) ||
113 !list_empty(&session->leadconn->mgmtqueue)) { 114 !list_empty(&session->leadconn->mgmtqueue))
114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 115 iscsi_conn_queue_work(session->leadconn);
115 iscsi_conn_queue_work(session->leadconn);
116 }
117 } 116 }
118} 117}
119EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 118EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
257 itt_t itt; 256 itt_t itt;
258 int rc; 257 int rc;
259 258
260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 259 if (conn->session->tt->alloc_pdu) {
261 if (rc) 260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
262 return rc; 261 if (rc)
262 return rc;
263 }
263 hdr = (struct iscsi_cmd *) task->hdr; 264 hdr = (struct iscsi_cmd *) task->hdr;
264 itt = hdr->itt; 265 itt = hdr->itt;
265 memset(hdr, 0, sizeof(*hdr)); 266 memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
364 return -EIO; 365 return -EIO;
365 366
366 task->state = ISCSI_TASK_RUNNING; 367 task->state = ISCSI_TASK_RUNNING;
367 list_move_tail(&task->running, &conn->run_list);
368 368
369 conn->scsicmd_pdus_cnt++; 369 conn->scsicmd_pdus_cnt++;
370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
380} 380}
381 381
382/** 382/**
383 * iscsi_complete_command - finish a task 383 * iscsi_free_task - free a task
384 * @task: iscsi cmd task 384 * @task: iscsi cmd task
385 * 385 *
386 * Must be called with session lock. 386 * Must be called with session lock.
387 * This function returns the scsi command to scsi-ml or cleans 387 * This function returns the scsi command to scsi-ml or cleans
388 * up mgmt tasks then returns the task to the pool. 388 * up mgmt tasks then returns the task to the pool.
389 */ 389 */
390static void iscsi_complete_command(struct iscsi_task *task) 390static void iscsi_free_task(struct iscsi_task *task)
391{ 391{
392 struct iscsi_conn *conn = task->conn; 392 struct iscsi_conn *conn = task->conn;
393 struct iscsi_session *session = conn->session; 393 struct iscsi_session *session = conn->session;
394 struct scsi_cmnd *sc = task->sc; 394 struct scsi_cmnd *sc = task->sc;
395 395
396 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
397 task->itt, task->state, task->sc);
398
396 session->tt->cleanup_task(task); 399 session->tt->cleanup_task(task);
397 list_del_init(&task->running); 400 task->state = ISCSI_TASK_FREE;
398 task->state = ISCSI_TASK_COMPLETED;
399 task->sc = NULL; 401 task->sc = NULL;
400
401 if (conn->task == task)
402 conn->task = NULL;
403 /* 402 /*
404 * login task is preallocated so do not free 403 * login task is preallocated so do not free
405 */ 404 */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
408 407
409 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 408 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
410 409
411 if (conn->ping_task == task)
412 conn->ping_task = NULL;
413
414 if (sc) { 410 if (sc) {
415 task->sc = NULL; 411 task->sc = NULL;
416 /* SCSI eh reuses commands to verify us */ 412 /* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
433static void __iscsi_put_task(struct iscsi_task *task) 429static void __iscsi_put_task(struct iscsi_task *task)
434{ 430{
435 if (atomic_dec_and_test(&task->refcount)) 431 if (atomic_dec_and_test(&task->refcount))
436 iscsi_complete_command(task); 432 iscsi_free_task(task);
437} 433}
438 434
439void iscsi_put_task(struct iscsi_task *task) 435void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
446} 442}
447EXPORT_SYMBOL_GPL(iscsi_put_task); 443EXPORT_SYMBOL_GPL(iscsi_put_task);
448 444
445/**
446 * iscsi_complete_task - finish a task
447 * @task: iscsi cmd task
448 * @state: state to complete task with
449 *
450 * Must be called with session lock.
451 */
452static void iscsi_complete_task(struct iscsi_task *task, int state)
453{
454 struct iscsi_conn *conn = task->conn;
455
456 ISCSI_DBG_SESSION(conn->session,
457 "complete task itt 0x%x state %d sc %p\n",
458 task->itt, task->state, task->sc);
459 if (task->state == ISCSI_TASK_COMPLETED ||
460 task->state == ISCSI_TASK_ABRT_TMF ||
461 task->state == ISCSI_TASK_ABRT_SESS_RECOV)
462 return;
463 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
464 task->state = state;
465
466 if (!list_empty(&task->running))
467 list_del_init(&task->running);
468
469 if (conn->task == task)
470 conn->task = NULL;
471
472 if (conn->ping_task == task)
473 conn->ping_task = NULL;
474
475 /* release get from queueing */
476 __iscsi_put_task(task);
477}
478
449/* 479/*
450 * session lock must be held 480 * session lock must be held and if not called for a task that is
481 * still pending or from the xmit thread, then xmit thread must
482 * be suspended.
451 */ 483 */
452static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 484static void fail_scsi_task(struct iscsi_task *task, int err)
453 int err)
454{ 485{
486 struct iscsi_conn *conn = task->conn;
455 struct scsi_cmnd *sc; 487 struct scsi_cmnd *sc;
488 int state;
456 489
490 /*
491 * if a command completes and we get a successful tmf response
492 * we will hit this because the scsi eh abort code does not take
493 * a ref to the task.
494 */
457 sc = task->sc; 495 sc = task->sc;
458 if (!sc) 496 if (!sc)
459 return; 497 return;
460 498
461 if (task->state == ISCSI_TASK_PENDING) 499 if (task->state == ISCSI_TASK_PENDING) {
462 /* 500 /*
463 * cmd never made it to the xmit thread, so we should not count 501 * cmd never made it to the xmit thread, so we should not count
464 * the cmd in the sequencing 502 * the cmd in the sequencing
465 */ 503 */
466 conn->session->queued_cmdsn--; 504 conn->session->queued_cmdsn--;
505 /* it was never sent so just complete like normal */
506 state = ISCSI_TASK_COMPLETED;
507 } else if (err == DID_TRANSPORT_DISRUPTED)
508 state = ISCSI_TASK_ABRT_SESS_RECOV;
509 else
510 state = ISCSI_TASK_ABRT_TMF;
467 511
468 sc->result = err; 512 sc->result = err << 16;
469 if (!scsi_bidi_cmnd(sc)) 513 if (!scsi_bidi_cmnd(sc))
470 scsi_set_resid(sc, scsi_bufflen(sc)); 514 scsi_set_resid(sc, scsi_bufflen(sc));
471 else { 515 else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
473 scsi_in(sc)->resid = scsi_in(sc)->length; 517 scsi_in(sc)->resid = scsi_in(sc)->length;
474 } 518 }
475 519
476 if (conn->task == task) 520 iscsi_complete_task(task, state);
477 conn->task = NULL;
478 /* release ref from queuecommand */
479 __iscsi_put_task(task);
480} 521}
481 522
482static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 523static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
516 session->state = ISCSI_STATE_LOGGING_OUT; 557 session->state = ISCSI_STATE_LOGGING_OUT;
517 558
518 task->state = ISCSI_TASK_RUNNING; 559 task->state = ISCSI_TASK_RUNNING;
519 list_move_tail(&task->running, &conn->mgmt_run_list);
520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 560 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 561 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
522 hdr->itt, task->data_count); 562 hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
528 char *data, uint32_t data_size) 568 char *data, uint32_t data_size)
529{ 569{
530 struct iscsi_session *session = conn->session; 570 struct iscsi_session *session = conn->session;
571 struct iscsi_host *ihost = shost_priv(session->host);
531 struct iscsi_task *task; 572 struct iscsi_task *task;
532 itt_t itt; 573 itt_t itt;
533 574
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
544 */ 585 */
545 task = conn->login_task; 586 task = conn->login_task;
546 else { 587 else {
588 if (session->state != ISCSI_STATE_LOGGED_IN)
589 return NULL;
590
547 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 591 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
548 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 592 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
549 593
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
559 atomic_set(&task->refcount, 1); 603 atomic_set(&task->refcount, 1);
560 task->conn = conn; 604 task->conn = conn;
561 task->sc = NULL; 605 task->sc = NULL;
606 INIT_LIST_HEAD(&task->running);
607 task->state = ISCSI_TASK_PENDING;
562 608
563 if (data_size) { 609 if (data_size) {
564 memcpy(task->data, data, data_size); 610 memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
566 } else 612 } else
567 task->data_count = 0; 613 task->data_count = 0;
568 614
569 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 615 if (conn->session->tt->alloc_pdu) {
570 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 616 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
571 "pdu for mgmt task.\n"); 617 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
572 goto requeue_task; 618 "pdu for mgmt task.\n");
619 goto free_task;
620 }
573 } 621 }
622
574 itt = task->hdr->itt; 623 itt = task->hdr->itt;
575 task->hdr_len = sizeof(struct iscsi_hdr); 624 task->hdr_len = sizeof(struct iscsi_hdr);
576 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 625 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
583 task->conn->session->age); 632 task->conn->session->age);
584 } 633 }
585 634
586 INIT_LIST_HEAD(&task->running); 635 if (!ihost->workq) {
587 list_add_tail(&task->running, &conn->mgmtqueue);
588
589 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
590 if (iscsi_prep_mgmt_task(conn, task)) 636 if (iscsi_prep_mgmt_task(conn, task))
591 goto free_task; 637 goto free_task;
592 638
593 if (session->tt->xmit_task(task)) 639 if (session->tt->xmit_task(task))
594 goto free_task; 640 goto free_task;
595 641 } else {
596 } else 642 list_add_tail(&task->running, &conn->mgmtqueue);
597 iscsi_conn_queue_work(conn); 643 iscsi_conn_queue_work(conn);
644 }
598 645
599 return task; 646 return task;
600 647
601free_task: 648free_task:
602 __iscsi_put_task(task); 649 __iscsi_put_task(task);
603 return NULL; 650 return NULL;
604
605requeue_task:
606 if (task != conn->login_task)
607 __kfifo_put(session->cmdpool.queue, (void*)&task,
608 sizeof(void*));
609 return NULL;
610} 651}
611 652
612int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 653int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 742 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
702 } 743 }
703out: 744out:
704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", 745 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
705 sc, sc->result, task->itt); 746 sc, sc->result, task->itt);
706 conn->scsirsp_pdus_cnt++; 747 conn->scsirsp_pdus_cnt++;
707 748 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
708 __iscsi_put_task(task);
709} 749}
710 750
711/** 751/**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
724 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 764 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
725 return; 765 return;
726 766
767 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
727 sc->result = (DID_OK << 16) | rhdr->cmd_status; 768 sc->result = (DID_OK << 16) | rhdr->cmd_status;
728 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 769 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
729 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 770 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
738 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 779 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
739 } 780 }
740 781
782 ISCSI_DBG_SESSION(conn->session, "data in with status done "
783 "[sc %p res %d itt 0x%x]\n",
784 sc, sc->result, task->itt);
741 conn->scsirsp_pdus_cnt++; 785 conn->scsirsp_pdus_cnt++;
742 __iscsi_put_task(task); 786 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
743} 787}
744 788
745static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 789static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
823 * 867 *
824 * The session lock must be held. 868 * The session lock must be held.
825 */ 869 */
826static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 870struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
827{ 871{
828 struct iscsi_session *session = conn->session; 872 struct iscsi_session *session = conn->session;
829 int i; 873 int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
840 884
841 return session->cmds[i]; 885 return session->cmds[i];
842} 886}
887EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
843 888
844/** 889/**
845 * __iscsi_complete_pdu - complete pdu 890 * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
959 } 1004 }
960 1005
961 iscsi_tmf_rsp(conn, hdr); 1006 iscsi_tmf_rsp(conn, hdr);
962 __iscsi_put_task(task); 1007 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
963 break; 1008 break;
964 case ISCSI_OP_NOOP_IN: 1009 case ISCSI_OP_NOOP_IN:
965 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1010 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
977 goto recv_pdu; 1022 goto recv_pdu;
978 1023
979 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1024 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
980 __iscsi_put_task(task); 1025 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
981 break; 1026 break;
982 default: 1027 default:
983 rc = ISCSI_ERR_BAD_OPCODE; 1028 rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
989recv_pdu: 1034recv_pdu:
990 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1035 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
991 rc = ISCSI_ERR_CONN_FAILED; 1036 rc = ISCSI_ERR_CONN_FAILED;
992 __iscsi_put_task(task); 1037 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
993 return rc; 1038 return rc;
994} 1039}
995EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1040EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
1166{ 1211{
1167 struct iscsi_conn *conn = task->conn; 1212 struct iscsi_conn *conn = task->conn;
1168 1213
1169 list_move_tail(&task->running, &conn->requeue); 1214 /*
1215 * this may be on the requeue list already if the xmit_task callout
1216 * is handling the r2ts while we are adding new ones
1217 */
1218 if (list_empty(&task->running))
1219 list_add_tail(&task->running, &conn->requeue);
1170 iscsi_conn_queue_work(conn); 1220 iscsi_conn_queue_work(conn);
1171} 1221}
1172EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1222EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
1206 while (!list_empty(&conn->mgmtqueue)) { 1256 while (!list_empty(&conn->mgmtqueue)) {
1207 conn->task = list_entry(conn->mgmtqueue.next, 1257 conn->task = list_entry(conn->mgmtqueue.next,
1208 struct iscsi_task, running); 1258 struct iscsi_task, running);
1259 list_del_init(&conn->task->running);
1209 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1260 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1210 __iscsi_put_task(conn->task); 1261 __iscsi_put_task(conn->task);
1211 conn->task = NULL; 1262 conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
1217 } 1268 }
1218 1269
1219 /* process pending command queue */ 1270 /* process pending command queue */
1220 while (!list_empty(&conn->xmitqueue)) { 1271 while (!list_empty(&conn->cmdqueue)) {
1221 if (conn->tmf_state == TMF_QUEUED) 1272 if (conn->tmf_state == TMF_QUEUED)
1222 break; 1273 break;
1223 1274
1224 conn->task = list_entry(conn->xmitqueue.next, 1275 conn->task = list_entry(conn->cmdqueue.next,
1225 struct iscsi_task, running); 1276 struct iscsi_task, running);
1277 list_del_init(&conn->task->running);
1226 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1278 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1227 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1279 fail_scsi_task(conn->task, DID_IMM_RETRY);
1228 continue; 1280 continue;
1229 } 1281 }
1230 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1282 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1231 if (rc) { 1283 if (rc) {
1232 if (rc == -ENOMEM) { 1284 if (rc == -ENOMEM) {
1285 list_add_tail(&conn->task->running,
1286 &conn->cmdqueue);
1233 conn->task = NULL; 1287 conn->task = NULL;
1234 goto again; 1288 goto again;
1235 } else 1289 } else
1236 fail_command(conn, conn->task, DID_ABORT << 16); 1290 fail_scsi_task(conn->task, DID_ABORT);
1237 continue; 1291 continue;
1238 } 1292 }
1239 rc = iscsi_xmit_task(conn); 1293 rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
1260 1314
1261 conn->task = list_entry(conn->requeue.next, 1315 conn->task = list_entry(conn->requeue.next,
1262 struct iscsi_task, running); 1316 struct iscsi_task, running);
1317 list_del_init(&conn->task->running);
1263 conn->task->state = ISCSI_TASK_RUNNING; 1318 conn->task->state = ISCSI_TASK_RUNNING;
1264 list_move_tail(conn->requeue.next, &conn->run_list);
1265 rc = iscsi_xmit_task(conn); 1319 rc = iscsi_xmit_task(conn);
1266 if (rc) 1320 if (rc)
1267 goto again; 1321 goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1328{ 1382{
1329 struct iscsi_cls_session *cls_session; 1383 struct iscsi_cls_session *cls_session;
1330 struct Scsi_Host *host; 1384 struct Scsi_Host *host;
1385 struct iscsi_host *ihost;
1331 int reason = 0; 1386 int reason = 0;
1332 struct iscsi_session *session; 1387 struct iscsi_session *session;
1333 struct iscsi_conn *conn; 1388 struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1338 sc->SCp.ptr = NULL; 1393 sc->SCp.ptr = NULL;
1339 1394
1340 host = sc->device->host; 1395 host = sc->device->host;
1396 ihost = shost_priv(host);
1341 spin_unlock(host->host_lock); 1397 spin_unlock(host->host_lock);
1342 1398
1343 cls_session = starget_to_session(scsi_target(sc->device)); 1399 cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1350 goto fault; 1406 goto fault;
1351 } 1407 }
1352 1408
1353 /* 1409 if (session->state != ISCSI_STATE_LOGGED_IN) {
1354 * ISCSI_STATE_FAILED is a temp. state. The recovery
1355 * code will decide what is best to do with command queued
1356 * during this time
1357 */
1358 if (session->state != ISCSI_STATE_LOGGED_IN &&
1359 session->state != ISCSI_STATE_FAILED) {
1360 /* 1410 /*
1361 * to handle the race between when we set the recovery state 1411 * to handle the race between when we set the recovery state
1362 * and block the session we requeue here (commands could 1412 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1364 * up because the block code is not locked) 1414 * up because the block code is not locked)
1365 */ 1415 */
1366 switch (session->state) { 1416 switch (session->state) {
1417 case ISCSI_STATE_FAILED:
1367 case ISCSI_STATE_IN_RECOVERY: 1418 case ISCSI_STATE_IN_RECOVERY:
1368 reason = FAILURE_SESSION_IN_RECOVERY; 1419 reason = FAILURE_SESSION_IN_RECOVERY;
1369 goto reject; 1420 sc->result = DID_IMM_RETRY << 16;
1421 break;
1370 case ISCSI_STATE_LOGGING_OUT: 1422 case ISCSI_STATE_LOGGING_OUT:
1371 reason = FAILURE_SESSION_LOGGING_OUT; 1423 reason = FAILURE_SESSION_LOGGING_OUT;
1372 goto reject; 1424 sc->result = DID_IMM_RETRY << 16;
1425 break;
1373 case ISCSI_STATE_RECOVERY_FAILED: 1426 case ISCSI_STATE_RECOVERY_FAILED:
1374 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1427 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1375 sc->result = DID_TRANSPORT_FAILFAST << 16; 1428 sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1402 reason = FAILURE_OOM; 1455 reason = FAILURE_OOM;
1403 goto reject; 1456 goto reject;
1404 } 1457 }
1405 list_add_tail(&task->running, &conn->xmitqueue);
1406 1458
1407 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1459 if (!ihost->workq) {
1408 reason = iscsi_prep_scsi_cmd_pdu(task); 1460 reason = iscsi_prep_scsi_cmd_pdu(task);
1409 if (reason) { 1461 if (reason) {
1410 if (reason == -ENOMEM) { 1462 if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1419 reason = FAILURE_SESSION_NOT_READY; 1471 reason = FAILURE_SESSION_NOT_READY;
1420 goto prepd_reject; 1472 goto prepd_reject;
1421 } 1473 }
1422 } else 1474 } else {
1475 list_add_tail(&task->running, &conn->cmdqueue);
1423 iscsi_conn_queue_work(conn); 1476 iscsi_conn_queue_work(conn);
1477 }
1424 1478
1425 session->queued_cmdsn++; 1479 session->queued_cmdsn++;
1426 spin_unlock(&session->lock); 1480 spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1429 1483
1430prepd_reject: 1484prepd_reject:
1431 sc->scsi_done = NULL; 1485 sc->scsi_done = NULL;
1432 iscsi_complete_command(task); 1486 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1433reject: 1487reject:
1434 spin_unlock(&session->lock); 1488 spin_unlock(&session->lock);
1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1489 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
1439 1493
1440prepd_fault: 1494prepd_fault:
1441 sc->scsi_done = NULL; 1495 sc->scsi_done = NULL;
1442 iscsi_complete_command(task); 1496 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1443fault: 1497fault:
1444 spin_unlock(&session->lock); 1498 spin_unlock(&session->lock);
1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1499 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1608 * Fail commands. session lock held and recv side suspended and xmit 1662 * Fail commands. session lock held and recv side suspended and xmit
1609 * thread flushed 1663 * thread flushed
1610 */ 1664 */
1611static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1665static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1612 int error) 1666 int error)
1613{ 1667{
1614 struct iscsi_task *task, *tmp; 1668 struct iscsi_task *task;
1615 1669 int i;
1616 if (conn->task) {
1617 if (lun == -1 ||
1618 (conn->task->sc && conn->task->sc->device->lun == lun))
1619 conn->task = NULL;
1620 }
1621 1670
1622 /* flush pending */ 1671 for (i = 0; i < conn->session->cmds_max; i++) {
1623 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1672 task = conn->session->cmds[i];
1624 if (lun == task->sc->device->lun || lun == -1) { 1673 if (!task->sc || task->state == ISCSI_TASK_FREE)
1625 ISCSI_DBG_SESSION(conn->session, 1674 continue;
1626 "failing pending sc %p itt 0x%x\n",
1627 task->sc, task->itt);
1628 fail_command(conn, task, error << 16);
1629 }
1630 }
1631 1675
1632 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1676 if (lun != -1 && lun != task->sc->device->lun)
1633 if (lun == task->sc->device->lun || lun == -1) { 1677 continue;
1634 ISCSI_DBG_SESSION(conn->session,
1635 "failing requeued sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1637 fail_command(conn, task, error << 16);
1638 }
1639 }
1640 1678
1641 /* fail all other running */ 1679 ISCSI_DBG_SESSION(conn->session,
1642 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1680 "failing sc %p itt 0x%x state %d\n",
1643 if (lun == task->sc->device->lun || lun == -1) { 1681 task->sc, task->itt, task->state);
1644 ISCSI_DBG_SESSION(conn->session, 1682 fail_scsi_task(task, error);
1645 "failing in progress sc %p itt 0x%x\n",
1646 task->sc, task->itt);
1647 fail_command(conn, task, error << 16);
1648 }
1649 } 1683 }
1650} 1684}
1651 1685
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1655 struct iscsi_host *ihost = shost_priv(shost); 1689 struct iscsi_host *ihost = shost_priv(shost);
1656 1690
1657 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1691 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1658 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1692 if (ihost->workq)
1659 flush_workqueue(ihost->workq); 1693 flush_workqueue(ihost->workq);
1660} 1694}
1661EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1695EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1663static void iscsi_start_tx(struct iscsi_conn *conn) 1697static void iscsi_start_tx(struct iscsi_conn *conn)
1664{ 1698{
1665 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1699 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1666 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1700 iscsi_conn_queue_work(conn);
1667 iscsi_conn_queue_work(conn); 1701}
1702
1703/*
1704 * We want to make sure a ping is in flight. It has timed out.
1705 * And we are not busy processing a pdu that is making
1706 * progress but got started before the ping and is taking a while
1707 * to complete so the ping is just stuck behind it in a queue.
1708 */
1709static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1710{
1711 if (conn->ping_task &&
1712 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1713 (conn->ping_timeout * HZ), jiffies))
1714 return 1;
1715 else
1716 return 0;
1668} 1717}
1669 1718
1670static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1719static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1702 * if the ping timedout then we are in the middle of cleaning up 1751 * if the ping timedout then we are in the middle of cleaning up
1703 * and can let the iscsi eh handle it 1752 * and can let the iscsi eh handle it
1704 */ 1753 */
1705 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1754 if (iscsi_has_ping_timed_out(conn)) {
1706 (conn->ping_timeout * HZ), jiffies))
1707 rc = BLK_EH_RESET_TIMER; 1755 rc = BLK_EH_RESET_TIMER;
1756 goto done;
1757 }
1708 /* 1758 /*
1709 * if we are about to check the transport then give the command 1759 * if we are about to check the transport then give the command
1710 * more time 1760 * more time
1711 */ 1761 */
1712 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1762 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1713 jiffies)) 1763 jiffies)) {
1714 rc = BLK_EH_RESET_TIMER; 1764 rc = BLK_EH_RESET_TIMER;
1765 goto done;
1766 }
1767
1715 /* if in the middle of checking the transport then give us more time */ 1768 /* if in the middle of checking the transport then give us more time */
1716 if (conn->ping_task) 1769 if (conn->ping_task)
1717 rc = BLK_EH_RESET_TIMER; 1770 rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1738 1791
1739 recv_timeout *= HZ; 1792 recv_timeout *= HZ;
1740 last_recv = conn->last_recv; 1793 last_recv = conn->last_recv;
1741 if (conn->ping_task && 1794
1742 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1795 if (iscsi_has_ping_timed_out(conn)) {
1743 jiffies)) {
1744 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1796 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1745 "expired, last rx %lu, last ping %lu, " 1797 "expired, recv timeout %d, last rx %lu, "
1746 "now %lu\n", conn->ping_timeout, last_recv, 1798 "last ping %lu, now %lu\n",
1747 conn->last_ping, jiffies); 1799 conn->ping_timeout, conn->recv_timeout,
1800 last_recv, conn->last_ping, jiffies);
1748 spin_unlock(&session->lock); 1801 spin_unlock(&session->lock);
1749 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1802 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1750 return; 1803 return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1788 cls_session = starget_to_session(scsi_target(sc->device)); 1841 cls_session = starget_to_session(scsi_target(sc->device));
1789 session = cls_session->dd_data; 1842 session = cls_session->dd_data;
1790 1843
1844 ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
1845
1791 mutex_lock(&session->eh_mutex); 1846 mutex_lock(&session->eh_mutex);
1792 spin_lock_bh(&session->lock); 1847 spin_lock_bh(&session->lock);
1793 /* 1848 /*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1810 sc->SCp.phase != session->age) { 1865 sc->SCp.phase != session->age) {
1811 spin_unlock_bh(&session->lock); 1866 spin_unlock_bh(&session->lock);
1812 mutex_unlock(&session->eh_mutex); 1867 mutex_unlock(&session->eh_mutex);
1868 ISCSI_DBG_SESSION(session, "failing abort due to dropped "
1869 "session.\n");
1813 return FAILED; 1870 return FAILED;
1814 } 1871 }
1815 1872
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1829 } 1886 }
1830 1887
1831 if (task->state == ISCSI_TASK_PENDING) { 1888 if (task->state == ISCSI_TASK_PENDING) {
1832 fail_command(conn, task, DID_ABORT << 16); 1889 fail_scsi_task(task, DID_ABORT);
1833 goto success; 1890 goto success;
1834 } 1891 }
1835 1892
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1860 * then sent more data for the cmd. 1917 * then sent more data for the cmd.
1861 */ 1918 */
1862 spin_lock(&session->lock); 1919 spin_lock(&session->lock);
1863 fail_command(conn, task, DID_ABORT << 16); 1920 fail_scsi_task(task, DID_ABORT);
1864 conn->tmf_state = TMF_INITIAL; 1921 conn->tmf_state = TMF_INITIAL;
1865 spin_unlock(&session->lock); 1922 spin_unlock(&session->lock);
1866 iscsi_start_tx(conn); 1923 iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1967 iscsi_suspend_tx(conn); 2024 iscsi_suspend_tx(conn);
1968 2025
1969 spin_lock_bh(&session->lock); 2026 spin_lock_bh(&session->lock);
1970 fail_all_commands(conn, sc->device->lun, DID_ERROR); 2027 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
1971 conn->tmf_state = TMF_INITIAL; 2028 conn->tmf_state = TMF_INITIAL;
1972 spin_unlock_bh(&session->lock); 2029 spin_unlock_bh(&session->lock);
1973 2030
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2274 if (cmd_task_size) 2331 if (cmd_task_size)
2275 task->dd_data = &task[1]; 2332 task->dd_data = &task[1];
2276 task->itt = cmd_i; 2333 task->itt = cmd_i;
2334 task->state = ISCSI_TASK_FREE;
2277 INIT_LIST_HEAD(&task->running); 2335 INIT_LIST_HEAD(&task->running);
2278 } 2336 }
2279 2337
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2360 conn->transport_timer.data = (unsigned long)conn; 2418 conn->transport_timer.data = (unsigned long)conn;
2361 conn->transport_timer.function = iscsi_check_transport_timeouts; 2419 conn->transport_timer.function = iscsi_check_transport_timeouts;
2362 2420
2363 INIT_LIST_HEAD(&conn->run_list);
2364 INIT_LIST_HEAD(&conn->mgmt_run_list);
2365 INIT_LIST_HEAD(&conn->mgmtqueue); 2421 INIT_LIST_HEAD(&conn->mgmtqueue);
2366 INIT_LIST_HEAD(&conn->xmitqueue); 2422 INIT_LIST_HEAD(&conn->cmdqueue);
2367 INIT_LIST_HEAD(&conn->requeue); 2423 INIT_LIST_HEAD(&conn->requeue);
2368 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2424 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2369 2425
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2531EXPORT_SYMBOL_GPL(iscsi_conn_start); 2587EXPORT_SYMBOL_GPL(iscsi_conn_start);
2532 2588
2533static void 2589static void
2534flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2590fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
2535{ 2591{
2536 struct iscsi_task *task, *tmp; 2592 struct iscsi_task *task;
2593 int i, state;
2537 2594
2538 /* handle pending */ 2595 for (i = 0; i < conn->session->cmds_max; i++) {
2539 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2596 task = conn->session->cmds[i];
2540 ISCSI_DBG_SESSION(session, "flushing pending mgmt task " 2597 if (task->sc)
2541 "itt 0x%x\n", task->itt); 2598 continue;
2542 /* release ref from prep task */
2543 __iscsi_put_task(task);
2544 }
2545 2599
2546 /* handle running */ 2600 if (task->state == ISCSI_TASK_FREE)
2547 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2601 continue;
2548 ISCSI_DBG_SESSION(session, "flushing running mgmt task " 2602
2549 "itt 0x%x\n", task->itt); 2603 ISCSI_DBG_SESSION(conn->session,
2550 /* release ref from prep task */ 2604 "failing mgmt itt 0x%x state %d\n",
2551 __iscsi_put_task(task); 2605 task->itt, task->state);
2552 } 2606 state = ISCSI_TASK_ABRT_SESS_RECOV;
2607 if (task->state == ISCSI_TASK_PENDING)
2608 state = ISCSI_TASK_COMPLETED;
2609 iscsi_complete_task(task, state);
2553 2610
2554 conn->task = NULL; 2611 }
2555} 2612}
2556 2613
2557static void iscsi_start_session_recovery(struct iscsi_session *session, 2614static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2559{ 2616{
2560 int old_stop_stage; 2617 int old_stop_stage;
2561 2618
2562 del_timer_sync(&conn->transport_timer);
2563
2564 mutex_lock(&session->eh_mutex); 2619 mutex_lock(&session->eh_mutex);
2565 spin_lock_bh(&session->lock); 2620 spin_lock_bh(&session->lock);
2566 if (conn->stop_stage == STOP_CONN_TERM) { 2621 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2578 session->state = ISCSI_STATE_TERMINATE; 2633 session->state = ISCSI_STATE_TERMINATE;
2579 else if (conn->stop_stage != STOP_CONN_RECOVER) 2634 else if (conn->stop_stage != STOP_CONN_RECOVER)
2580 session->state = ISCSI_STATE_IN_RECOVERY; 2635 session->state = ISCSI_STATE_IN_RECOVERY;
2636 spin_unlock_bh(&session->lock);
2637
2638 del_timer_sync(&conn->transport_timer);
2639 iscsi_suspend_tx(conn);
2581 2640
2641 spin_lock_bh(&session->lock);
2582 old_stop_stage = conn->stop_stage; 2642 old_stop_stage = conn->stop_stage;
2583 conn->stop_stage = flag; 2643 conn->stop_stage = flag;
2584 conn->c_stage = ISCSI_CONN_STOPPED; 2644 conn->c_stage = ISCSI_CONN_STOPPED;
2585 spin_unlock_bh(&session->lock); 2645 spin_unlock_bh(&session->lock);
2586 2646
2587 iscsi_suspend_tx(conn);
2588 /* 2647 /*
2589 * for connection level recovery we should not calculate 2648 * for connection level recovery we should not calculate
2590 * header digest. conn->hdr_size used for optimization 2649 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2605 * flush queues. 2664 * flush queues.
2606 */ 2665 */
2607 spin_lock_bh(&session->lock); 2666 spin_lock_bh(&session->lock);
2608 if (flag == STOP_CONN_RECOVER) 2667 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2609 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2668 fail_mgmt_tasks(session, conn);
2610 else
2611 fail_all_commands(conn, -1, DID_ERROR);
2612 flush_control_queues(session, conn);
2613 spin_unlock_bh(&session->lock); 2669 spin_unlock_bh(&session->lock);
2614 mutex_unlock(&session->eh_mutex); 2670 mutex_unlock(&session->eh_mutex);
2615} 2671}
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2651} 2707}
2652EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2708EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2653 2709
2710static int iscsi_switch_str_param(char **param, char *new_val_buf)
2711{
2712 char *new_val;
2713
2714 if (*param) {
2715 if (!strcmp(*param, new_val_buf))
2716 return 0;
2717 }
2718
2719 new_val = kstrdup(new_val_buf, GFP_NOIO);
2720 if (!new_val)
2721 return -ENOMEM;
2722
2723 kfree(*param);
2724 *param = new_val;
2725 return 0;
2726}
2654 2727
2655int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2728int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2656 enum iscsi_param param, char *buf, int buflen) 2729 enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2723 sscanf(buf, "%u", &conn->exp_statsn); 2796 sscanf(buf, "%u", &conn->exp_statsn);
2724 break; 2797 break;
2725 case ISCSI_PARAM_USERNAME: 2798 case ISCSI_PARAM_USERNAME:
2726 kfree(session->username); 2799 return iscsi_switch_str_param(&session->username, buf);
2727 session->username = kstrdup(buf, GFP_KERNEL);
2728 if (!session->username)
2729 return -ENOMEM;
2730 break;
2731 case ISCSI_PARAM_USERNAME_IN: 2800 case ISCSI_PARAM_USERNAME_IN:
2732 kfree(session->username_in); 2801 return iscsi_switch_str_param(&session->username_in, buf);
2733 session->username_in = kstrdup(buf, GFP_KERNEL);
2734 if (!session->username_in)
2735 return -ENOMEM;
2736 break;
2737 case ISCSI_PARAM_PASSWORD: 2802 case ISCSI_PARAM_PASSWORD:
2738 kfree(session->password); 2803 return iscsi_switch_str_param(&session->password, buf);
2739 session->password = kstrdup(buf, GFP_KERNEL);
2740 if (!session->password)
2741 return -ENOMEM;
2742 break;
2743 case ISCSI_PARAM_PASSWORD_IN: 2804 case ISCSI_PARAM_PASSWORD_IN:
2744 kfree(session->password_in); 2805 return iscsi_switch_str_param(&session->password_in, buf);
2745 session->password_in = kstrdup(buf, GFP_KERNEL);
2746 if (!session->password_in)
2747 return -ENOMEM;
2748 break;
2749 case ISCSI_PARAM_TARGET_NAME: 2806 case ISCSI_PARAM_TARGET_NAME:
2750 /* this should not change between logins */ 2807 return iscsi_switch_str_param(&session->targetname, buf);
2751 if (session->targetname)
2752 break;
2753
2754 session->targetname = kstrdup(buf, GFP_KERNEL);
2755 if (!session->targetname)
2756 return -ENOMEM;
2757 break;
2758 case ISCSI_PARAM_TPGT: 2808 case ISCSI_PARAM_TPGT:
2759 sscanf(buf, "%d", &session->tpgt); 2809 sscanf(buf, "%d", &session->tpgt);
2760 break; 2810 break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2762 sscanf(buf, "%d", &conn->persistent_port); 2812 sscanf(buf, "%d", &conn->persistent_port);
2763 break; 2813 break;
2764 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2814 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2765 /* 2815 return iscsi_switch_str_param(&conn->persistent_address, buf);
2766 * this is the address returned in discovery so it should
2767 * not change between logins.
2768 */
2769 if (conn->persistent_address)
2770 break;
2771
2772 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2773 if (!conn->persistent_address)
2774 return -ENOMEM;
2775 break;
2776 case ISCSI_PARAM_IFACE_NAME: 2816 case ISCSI_PARAM_IFACE_NAME:
2777 if (!session->ifacename) 2817 return iscsi_switch_str_param(&session->ifacename, buf);
2778 session->ifacename = kstrdup(buf, GFP_KERNEL);
2779 break;
2780 case ISCSI_PARAM_INITIATOR_NAME: 2818 case ISCSI_PARAM_INITIATOR_NAME:
2781 if (!session->initiatorname) 2819 return iscsi_switch_str_param(&session->initiatorname, buf);
2782 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2783 break;
2784 default: 2820 default:
2785 return -ENOSYS; 2821 return -ENOSYS;
2786 } 2822 }
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2851 len = sprintf(buf, "%s\n", session->ifacename); 2887 len = sprintf(buf, "%s\n", session->ifacename);
2852 break; 2888 break;
2853 case ISCSI_PARAM_INITIATOR_NAME: 2889 case ISCSI_PARAM_INITIATOR_NAME:
2854 if (!session->initiatorname) 2890 len = sprintf(buf, "%s\n", session->initiatorname);
2855 len = sprintf(buf, "%s\n", "unknown");
2856 else
2857 len = sprintf(buf, "%s\n", session->initiatorname);
2858 break; 2891 break;
2859 default: 2892 default:
2860 return -ENOSYS; 2893 return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2920 2953
2921 switch (param) { 2954 switch (param) {
2922 case ISCSI_HOST_PARAM_NETDEV_NAME: 2955 case ISCSI_HOST_PARAM_NETDEV_NAME:
2923 if (!ihost->netdev) 2956 len = sprintf(buf, "%s\n", ihost->netdev);
2924 len = sprintf(buf, "%s\n", "default");
2925 else
2926 len = sprintf(buf, "%s\n", ihost->netdev);
2927 break; 2957 break;
2928 case ISCSI_HOST_PARAM_HWADDRESS: 2958 case ISCSI_HOST_PARAM_HWADDRESS:
2929 if (!ihost->hwaddress) 2959 len = sprintf(buf, "%s\n", ihost->hwaddress);
2930 len = sprintf(buf, "%s\n", "default");
2931 else
2932 len = sprintf(buf, "%s\n", ihost->hwaddress);
2933 break; 2960 break;
2934 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2961 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2935 if (!ihost->initiatorname) 2962 len = sprintf(buf, "%s\n", ihost->initiatorname);
2936 len = sprintf(buf, "%s\n", "unknown");
2937 else
2938 len = sprintf(buf, "%s\n", ihost->initiatorname);
2939 break; 2963 break;
2940 case ISCSI_HOST_PARAM_IPADDRESS: 2964 case ISCSI_HOST_PARAM_IPADDRESS:
2941 if (!strlen(ihost->local_address)) 2965 len = sprintf(buf, "%s\n", ihost->local_address);
2942 len = sprintf(buf, "%s\n", "unknown");
2943 else
2944 len = sprintf(buf, "%s\n",
2945 ihost->local_address);
2946 break; 2966 break;
2947 default: 2967 default:
2948 return -ENOSYS; 2968 return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2959 2979
2960 switch (param) { 2980 switch (param) {
2961 case ISCSI_HOST_PARAM_NETDEV_NAME: 2981 case ISCSI_HOST_PARAM_NETDEV_NAME:
2962 if (!ihost->netdev) 2982 return iscsi_switch_str_param(&ihost->netdev, buf);
2963 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2964 break;
2965 case ISCSI_HOST_PARAM_HWADDRESS: 2983 case ISCSI_HOST_PARAM_HWADDRESS:
2966 if (!ihost->hwaddress) 2984 return iscsi_switch_str_param(&ihost->hwaddress, buf);
2967 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2968 break;
2969 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2985 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2970 if (!ihost->initiatorname) 2986 return iscsi_switch_str_param(&ihost->initiatorname, buf);
2971 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2972 break;
2973 default: 2987 default:
2974 return -ENOSYS; 2988 return -ENOSYS;
2975 } 2989 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f483..2bc07090321 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
440 struct iscsi_tcp_task *tcp_task = task->dd_data; 440 struct iscsi_tcp_task *tcp_task = task->dd_data;
441 struct iscsi_r2t_info *r2t; 441 struct iscsi_r2t_info *r2t;
442 442
443 /* nothing to do for mgmt or pending tasks */ 443 /* nothing to do for mgmt */
444 if (!task->sc || task->state == ISCSI_TASK_PENDING) 444 if (!task->sc)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
473 int datasn = be32_to_cpu(rhdr->datasn); 473 int datasn = be32_to_cpu(rhdr->datasn);
474 unsigned total_in_length = scsi_in(task->sc)->length; 474 unsigned total_in_length = scsi_in(task->sc)->length;
475 475
476 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); 476 /*
477 * lib iscsi will update this in the completion handling if there
478 * is status.
479 */
480 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
481 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
482
477 if (tcp_conn->in.datalen == 0) 483 if (tcp_conn->in.datalen == 0)
478 return 0; 484 return 0;
479 485
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
857 int rc = 0; 863 int rc = 0;
858 864
859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); 865 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
866 /*
867 * Update for each skb instead of pdu, because over slow networks a
868 * data_in's data could take a while to read in. We also want to
869 * account for r2ts.
870 */
871 conn->last_recv = jiffies;
860 872
861 if (unlikely(conn->suspend_rx)) { 873 if (unlikely(conn->suspend_rx)) {
862 ISCSI_DBG_TCP(conn, "Rx suspended!\n"); 874 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e43678..54fa1e42dc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48..1bc3b756799 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111b..54056984909 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t,
467 struct lpfc_iocbq *, uint32_t);
468 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
469 struct lpfc_iocbq *);
470 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
471
472
473 IOCB_t * (*lpfc_get_iocb_from_iocbq)
474 (struct lpfc_iocbq *);
475 void (*lpfc_scsi_cmd_iocb_cmpl)
476 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
477
478 /* MBOX interface function jump table entries */
479 int (*lpfc_sli_issue_mbox)
480 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
481 /* Slow-path IOCB process function jump table entries */
482 void (*lpfc_sli_handle_slow_ring_event)
483 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t mask);
485 /* INIT device interface function jump table entries */
486 int (*lpfc_sli_hbq_to_firmware)
487 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
488 int (*lpfc_sli_brdrestart)
489 (struct lpfc_hba *);
490 int (*lpfc_sli_brdready)
491 (struct lpfc_hba *, uint32_t);
492 void (*lpfc_handle_eratt)
493 (struct lpfc_hba *);
494 void (*lpfc_stop_port)
495 (struct lpfc_hba *);
496
497
498 /* SLI4 specific HBA data structure */
499 struct lpfc_sli4_hba sli4_hba;
500
423 struct lpfc_sli sli; 501 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 502 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
503 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 504 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 505#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 506#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 508#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 509#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 510#define LPFC_SLI3_BG_ENABLED 0x20
511#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 512 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 513 uint32_t iocb_rsp_size;
434 514
@@ -442,8 +522,13 @@ struct lpfc_hba {
442 522
443 uint32_t hba_flag; /* hba generic flags */ 523 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 524#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 525#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 526#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
527#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
528#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
529#define FCP_XRI_ABORT_EVENT 0x20
530#define ELS_XRI_ABORT_EVENT 0x40
531#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 532 struct lpfc_dmabuf slim2p;
448 533
449 MAILBOX_t *mbox; 534 MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 587 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 588 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 589 uint32_t cfg_use_msi;
590 uint32_t cfg_fcp_imax;
591 uint32_t cfg_fcp_wq_count;
592 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 593 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 594 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 595 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 599 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 600 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 601 uint32_t cfg_enable_bg;
602 uint32_t cfg_enable_fip;
603 uint32_t cfg_log_verbose;
514 604
515 lpfc_vpd_t vpd; /* vital product data */ 605 lpfc_vpd_t vpd; /* vital product data */
516 606
@@ -526,11 +616,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 616 unsigned long data_flags;
527 617
528 uint32_t hbq_in_use; /* HBQs in use flag */ 618 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 619 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 620 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 621 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 622
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 623 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
624 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 625 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 626 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 627 PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 684 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 685 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 686 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 687 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
688 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 689 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 690
599 mempool_t *mbox_mem_pool; 691 mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 701 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 702 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 703#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
704 uint16_t max_vports; /*
705 * For IOV HBAs max_vpi can change
706 * after a reset. max_vports is max
707 * number of vports present. This can
708 * be greater than max_vpi.
709 */
710 uint16_t vpi_base;
711 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 712 unsigned long *vpi_bmask; /* vpi allocation table */
613 713
614 /* Data structure used by fabric iocb scheduler */ 714 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 767/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 768#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 769 atomic_t fast_event_count;
770 struct lpfc_fcf fcf;
771 uint8_t fc_map[3];
772 uint8_t valid_vlan;
773 uint16_t vlan_id;
774 struct list_head fcf_conn_rec_list;
670}; 775};
671 776
672static inline struct Scsi_Host * 777static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb12..d73e677201f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3278 }
3200 } 3279 }
3201 3280
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3281 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3282 buf, count);
3204 3283
3205 phba->sysfs_mbox.offset = off + count; 3284 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3320 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3321 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3322 int rc;
3323 MAILBOX_t *pmb;
3244 3324
3245 if (off > MAILBOX_CMD_SIZE) 3325 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3326 return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3345 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3346 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3347 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3348 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3349 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3350 /* Offline only */
3271 case MBX_INIT_LINK: 3351 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3352 case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3363 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3364 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3365 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3366 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3367 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3369 return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3399 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3400 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3401 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3402 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3403 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3405 return -EPERM;
3326 default: 3406 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3411 return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3415 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3416 */
3337 if (phba->pport->stopped && 3417 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3418 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3419 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3420 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3421 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3423 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3424 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3425 pmb->mbxCommand);
3346 3426
3347 phba->sysfs_mbox.mbox->vport = vport; 3427 phba->sysfs_mbox.mbox->vport = vport;
3348 3428
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3436 }
3357 3437
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3438 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3439 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3440
3361 spin_unlock_irq(&phba->hbalock); 3441 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3442 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3448 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3449 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3450 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3451 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3452 spin_lock_irq(&phba->hbalock);
3374 } 3453 }
3375 3454
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3470 return -EAGAIN;
3392 } 3471 }
3393 3472
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3473 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3474
3396 phba->sysfs_mbox.offset = off + count; 3475 phba->sysfs_mbox.offset = off + count;
3397 3476
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3664 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3665 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3666 break;
3667 case LA_10GHZ_LINK:
3668 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3669 break;
3588 default: 3670 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3671 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3672 break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3734 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3735 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3736 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3737 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3738 return NULL;
3657 3739
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3740 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3745 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3746 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3747
3666 pmb = &pmboxq->mb; 3748 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3749 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3750 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3751 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3752 pmboxq->vport = vport;
3671 3753
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3754 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3755 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3756 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3757 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3758 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3777 pmboxq->vport = vport;
3696 3778
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3779 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3780 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3782 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3851 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3852 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3853
3772 pmb = &pmboxq->mb; 3854 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3855 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3856 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3857 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3859 pmboxq->vport = vport;
3778 3860
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3861 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3862 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3863 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3864 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3865 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3877 pmboxq->vport = vport;
3796 3878
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3879 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3880 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3881 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3882 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3883 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4044 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4045}
3964 4046
4047/**
4048 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4049 * @phba: Pointer to lpfc_hba struct.
4050 *
4051 * This function is called by the lpfc_get_cfgparam() routine to set the
4052 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4053 * log messsage according to the module's lpfc_log_verbose parameter setting
4054 * before hba port or vport created.
4055 **/
4056static void
4057lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4058{
4059 phba->cfg_log_verbose = verbose;
4060}
4061
3965struct fc_function_template lpfc_transport_functions = { 4062struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4063 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4064 .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4202 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4203 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4204 lpfc_use_msi_init(phba, lpfc_use_msi);
4205 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4206 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4207 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4208 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4209 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4210 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4213 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4214 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4215 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4216 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4217 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4218 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4219
4136 return; 4220 return;
4137} 4221}
4138 4222
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f2619..d2a922997c0 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e..1dbccfd3d02 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (vp->rev.rBit) {
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1737 rev = vp->rev.sli2FwRev;
1735 else 1738 else
1736 rev = vp->rev.sli1FwRev; 1739 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1759 }
1757 b4 = (rev & 0x0000000f); 1760 b4 = (rev & 0x0000000f);
1758 1761
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1762 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1763 fwname = vp->rev.sli2FwName;
1761 else 1764 else
1762 fwname = vp->rev.sli1FwName; 1765 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07..2b02b1fb39a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 282 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 283 struct hbq_dmabuf *hbq_buf;
282 284
285 if (phba->sli_rev != 3)
286 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 287 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 288 spin_lock_irq(&phba->hbalock);
285 289
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 493 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 494 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 495 }
492 word0 = readl(phba->HAregaddr); 496
493 word1 = readl(phba->CAregaddr); 497 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 498 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 499 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 500 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 501 word3 = readl(phba->HCregaddr);
502 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
503 "HC:%08x\n", word0, word1, word2, word3);
504 }
498 spin_unlock_irq(&phba->hbalock); 505 spin_unlock_irq(&phba->hbalock);
499 return len; 506 return len;
500} 507}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd10897207..1142070e948 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d..6bdeb14878a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 222 icmd->un.elsreq64.myID = vport->fc_myDID;
220 223
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 224 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 225 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 226 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 227 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 228 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 308 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 309 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 310 **/
308static int 311int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 312lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 313{
311 struct lpfc_hba *phba = vport->phba; 314 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 348 err = 4;
346 goto fail; 349 goto fail;
347 } 350 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 351 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 352 if (rc) {
351 err = 5; 353 err = 5;
352 goto fail_free_mbox; 354 goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
386} 388}
387 389
388/** 390/**
391 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
392 * @vport: pointer to a host virtual N_Port data structure.
393 *
394 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
395 * the @vport. This mailbox command is necessary for FCoE only.
396 *
397 * Return code
398 * 0 - successfully issued REG_VFI for @vport
399 * A failure code otherwise.
400 **/
401static int
402lpfc_issue_reg_vfi(struct lpfc_vport *vport)
403{
404 struct lpfc_hba *phba = vport->phba;
405 LPFC_MBOXQ_t *mboxq;
406 struct lpfc_nodelist *ndlp;
407 struct serv_parm *sp;
408 struct lpfc_dmabuf *dmabuf;
409 int rc = 0;
410
411 sp = &phba->fc_fabparam;
412 ndlp = lpfc_findnode_did(vport, Fabric_DID);
413 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
414 rc = -ENODEV;
415 goto fail;
416 }
417
418 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
419 if (!dmabuf) {
420 rc = -ENOMEM;
421 goto fail;
422 }
423 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
424 if (!dmabuf->virt) {
425 rc = -ENOMEM;
426 goto fail_free_dmabuf;
427 }
428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
429 if (!mboxq) {
430 rc = -ENOMEM;
431 goto fail_free_coherent;
432 }
433 vport->port_state = LPFC_FABRIC_CFG_LINK;
434 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
435 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
436 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
437 mboxq->vport = vport;
438 mboxq->context1 = dmabuf;
439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
440 if (rc == MBX_NOT_FINISHED) {
441 rc = -ENXIO;
442 goto fail_free_mbox;
443 }
444 return 0;
445
446fail_free_mbox:
447 mempool_free(mboxq, phba->mbox_mem_pool);
448fail_free_coherent:
449 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
450fail_free_dmabuf:
451 kfree(dmabuf);
452fail:
453 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
454 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
455 "0289 Issue Register VFI failed: Err %d\n", rc);
456 return rc;
457}
458
459/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 460 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 461 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 462 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 568 }
498 } 569 }
499 570
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 571 if (phba->sli_rev < LPFC_SLI_REV4) {
501 572 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 573 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 574 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 575 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 576 else
577 lpfc_issue_fabric_reglogin(vport);
578 } else {
579 ndlp->nlp_type |= NLP_FABRIC;
580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
581 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
582 lpfc_start_fdiscs(phba);
583 lpfc_do_scr_ns_plogi(phba, vport);
584 } else
585 lpfc_issue_reg_vfi(vport);
506 } 586 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 587 return 0;
509} 588}
510
511/** 589/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 590 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 591 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 893 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 894 sp->cmn.fcphHigh = FC_PH3;
817 895
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 896 if (phba->sli_rev == LPFC_SLI_REV4) {
897 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
898 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
899 /* FLOGI needs to be 3 for WQE FCFI */
900 /* Set the fcfi to the fcfi we registered with */
901 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
902 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 903 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 904 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 905 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 906 icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1013 if (!ndlp)
931 return 0; 1014 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1015 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1016 /* Set the node type */
1017 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1018 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1019 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1020 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1435 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1436 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1437 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1438 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1439 uint8_t *pcmd;
1356 uint16_t cmdsize; 1440 uint16_t cmdsize;
1357 int ret; 1441 int ret;
1358 1442
1359 psli = &phba->sli; 1443 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1444
1362 ndlp = lpfc_findnode_did(vport, did); 1445 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1446 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1474
1392 phba->fc_stat.elsXmitPLOGI++; 1475 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1476 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1477 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1478
1396 if (ret == IOCB_ERROR) { 1479 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1480 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1584 PRLI *npr;
1502 IOCB_t *icmd; 1585 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1586 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1587 uint8_t *pcmd;
1507 uint16_t cmdsize; 1588 uint16_t cmdsize;
1508 1589
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1590 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1591 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1592 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1628 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1629 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1630 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1631 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1632 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1633 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1634 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1635 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1687 * and continue discovery.
1609 */ 1688 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1690 !(vport->fc_flag & FC_RSCN_MODE) &&
1691 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1692 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1693 return;
1614 } 1694 }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1868 ADISC *ap;
1789 IOCB_t *icmd; 1869 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1870 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1871 uint8_t *pcmd;
1794 uint16_t cmdsize; 1872 uint16_t cmdsize;
1795 1873
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1900 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1901 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1902 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1903 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1904 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1905 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1906 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1907 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2016 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2017 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2018 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2019 uint8_t *pcmd;
1943 uint16_t cmdsize; 2020 uint16_t cmdsize;
1944 int rc; 2021 int rc;
1945 2022
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2023 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2024 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2025 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2052 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2053 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2054 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2055 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2056
1983 if (rc == IOCB_ERROR) { 2057 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2058 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2132 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2133 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2134 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2135 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2136 uint8_t *pcmd;
2064 uint16_t cmdsize; 2137 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2138 struct lpfc_nodelist *ndlp;
2066 2139
2067 psli = &phba->sli; 2140 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2141 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2142
2071 ndlp = lpfc_findnode_did(vport, nportid); 2143 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2180
2109 phba->fc_stat.elsXmitSCR++; 2181 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2182 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2183 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2184 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2185 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2186 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2187 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2225 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2226 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2227 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2228 struct lpfc_sli *psli;
2157 FARP *fp; 2229 FARP *fp;
2158 uint8_t *pcmd; 2230 uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2234 struct lpfc_nodelist *ndlp;
2163 2235
2164 psli = &phba->sli; 2236 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2237 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2238
2168 ndlp = lpfc_findnode_did(vport, nportid); 2239 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2290
2220 phba->fc_stat.elsXmitFARPR++; 2291 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2292 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2293 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2294 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2295 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2296 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2297 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3021 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3022 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3023
3024 /*
3025 * This routine is used to register and unregister in previous SLI
3026 * modes.
3027 */
3028 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3029 (phba->sli_rev == LPFC_SLI_REV4))
3030 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3031
2952 pmb->context1 = NULL; 3032 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3033 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3034 kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3041 */
2962 lpfc_nlp_not_used(ndlp); 3042 lpfc_nlp_not_used(ndlp);
2963 } 3043 }
3044
2964 return; 3045 return;
2965} 3046}
2966 3047
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3251 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3252 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3253 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3254 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3255 uint8_t *pcmd;
3176 uint16_t cmdsize; 3256 uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3258 ELS_PKT *els_pkt_ptr;
3179 3259
3180 psli = &phba->sli; 3260 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3261 oldcmd = &oldiocb->iocb;
3183 3262
3184 switch (flag) { 3263 switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3345 }
3267 3346
3268 phba->fc_stat.elsXmitACC++; 3347 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3348 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3349 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3350 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3351 return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3384 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3385 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3386 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3387 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3388 uint8_t *pcmd;
3311 uint16_t cmdsize; 3389 uint16_t cmdsize;
3312 int rc; 3390 int rc;
3313 3391
3314 psli = &phba->sli; 3392 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3393 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3395 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3422
3347 phba->fc_stat.elsXmitLSRJT++; 3423 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3426
3351 if (rc == IOCB_ERROR) { 3427 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3428 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3455 struct lpfc_nodelist *ndlp)
3380{ 3456{
3381 struct lpfc_hba *phba = vport->phba; 3457 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3458 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3459 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3460 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3496
3423 phba->fc_stat.elsXmitACC++; 3497 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3498 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3499 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3500 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3501 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3502 return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3533 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3534 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3535 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3536 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3537 uint8_t *pcmd;
3465 uint16_t cmdsize; 3538 uint16_t cmdsize;
3466 int rc; 3539 int rc;
3467 3540
3468 psli = &phba->sli; 3541 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3542
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3543 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3592 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3593 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3594
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3595 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3596 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3597 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3598 return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3634 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3635 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3636 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3637 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3638 uint8_t *pcmd;
3568 uint16_t cmdsize; 3639 uint16_t cmdsize;
3569 int rc; 3640 int rc;
3570 3641
3571 psli = &phba->sli; 3642 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3643 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3644 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3645 if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3695 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3696 * it could be freed */
3628 3697
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3699 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3701 return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3908 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3909 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3910 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3911 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3912 && (ns_did.un.b.area == rscn_did.un.b.area)
3913 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3914 goto return_did_out;
3844 break; 3915 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3916 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4371 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4372 phba->cfg_topology,
4302 phba->cfg_link_speed); 4373 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4374 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4375 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4376 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4511static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4512lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4513{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4514 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4515 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4516 RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4520 uint16_t xri, status;
4452 uint32_t cmdsize; 4521 uint32_t cmdsize;
4453 4522
4454 mb = &pmb->mb; 4523 mb = &pmb->u.mb;
4455 4524
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4525 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4526 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4576 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4578 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4580 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4581 return;
4513} 4582}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4685 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4686 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4687 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4688 uint8_t *pcmd;
4622 4689
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4690 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4721 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4722 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4723 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4724 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4725 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4726 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4727 return 1;
4660 } 4728 }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4951 } else {
4884 /* FAN verified - skip FLOGI */ 4952 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4953 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4954 if (phba->sli_rev < LPFC_SLI_REV4)
4955 lpfc_issue_fabric_reglogin(vport);
4956 else
4957 lpfc_issue_reg_vfi(vport);
4887 } 4958 }
4888 } 4959 }
4889 return 0; 4960 return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5637
5567dropit: 5638dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5639 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5640 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5641 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5642 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5643 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5644 phba->fc_stat.elsRcvDrop++;
5575} 5645}
5576 5646
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5716 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5717 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5718 vport = phba->pport;
5649 else { 5719 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5720 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5721 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5722 }
5654 /* If there are no BDEs associated 5723 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5724 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5850 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5852 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5853 MAILBOX_t *mb = &pmb->u.mb;
5785 5854
5786 spin_lock_irq(shost->host_lock); 5855 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5856 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5887
5819 } else { 5888 } else {
5820 if (vport == phba->pport) 5889 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5890 if (phba->sli_rev < LPFC_SLI_REV4)
5891 lpfc_issue_fabric_reglogin(vport);
5892 else
5893 lpfc_issue_reg_vfi(vport);
5822 else 5894 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5895 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5896 }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5922
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5924 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5925 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5926 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5927 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5928 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6211{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6213 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6214 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6215 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6216 uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6240 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6241 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6242 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6243 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6244 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6245 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6246 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6247 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6296 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6297 unsigned long iflags;
6226 int ret; 6298 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6299 IOCB_t *cmd;
6229 6300
6230repeat: 6301repeat:
@@ -6248,7 +6319,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6319 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6320 iocb->vport->port_state, 0, 0);
6250 6321
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6322 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6323
6253 if (ret == IOCB_ERROR) { 6324 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6325 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6465lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6466{
6396 unsigned long iflags; 6467 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6468 int ready;
6399 int ret; 6469 int ret;
6400 6470
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6488 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6489 iocb->vport->port_state, 0, 0);
6420 6490
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6491 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6492
6423 if (ret == IOCB_ERROR) { 6493 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6494 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6595 IOERR_SLI_ABORTED);
6526} 6596}
6597
6598/**
6599 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6600 * @phba: pointer to lpfc hba data structure.
6601 * @axri: pointer to the els xri abort wcqe structure.
6602 *
6603 * This routine is invoked by the worker thread to process a SLI4 slow-path
6604 * ELS aborted xri.
6605 **/
6606void
6607lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6608 struct sli4_wcqe_xri_aborted *axri)
6609{
6610 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6611 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6612 unsigned long iflag = 0;
6613
6614 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6615 list_for_each_entry_safe(sglq_entry, sglq_next,
6616 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6617 if (sglq_entry->sli4_xritag == xri) {
6618 list_del(&sglq_entry->list);
6619 spin_unlock_irqrestore(
6620 &phba->sli4_hba.abts_sgl_list_lock,
6621 iflag);
6622 spin_lock_irqsave(&phba->hbalock, iflag);
6623
6624 list_add_tail(&sglq_entry->list,
6625 &phba->sli4_hba.lpfc_sgl_list);
6626 spin_unlock_irqrestore(&phba->hbalock, iflag);
6627 return;
6628 }
6629 }
6630 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6631}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf70..35c41ae75be 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1203 new_fcf_record);
1204 if (phba->valid_vlan)
1205 *vlan_id = phba->vlan_id;
1206 else
1207 *vlan_id = 0xFFFF;
1208 return 1;
1209 }
1210
1211 /*
1212 * If there are no FCF connection table entry, driver connect to all
1213 * FCFs.
1214 */
1215 if (list_empty(&phba->fcf_conn_rec_list)) {
1216 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record);
1219 *vlan_id = 0xFFFF;
1220 return 1;
1221 }
1222
1223 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1224 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1225 continue;
1226
1227 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1228 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1229 new_fcf_record))
1230 continue;
1231
1232 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1233 /*
1234 * If the vlan bit map does not have the bit set for the
1235 * vlan id to be used, then it is not a match.
1236 */
1237 if (!(new_fcf_record->vlan_bitmap
1238 [conn_entry->conn_rec.vlan_tag / 8] &
1239 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1240 continue;
1241 }
1242
1243 /*
1244 * Check if the connection record specifies a required
1245 * addressing mode.
1246 */
1247 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1248 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1249
1250 /*
1251 * If SPMA required but FCF not support this continue.
1252 */
1253 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1254 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1255 new_fcf_record) & LPFC_FCF_SPMA))
1256 continue;
1257
1258 /*
1259 * If FPMA required but FCF not support this continue.
1260 */
1261 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1262 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1263 new_fcf_record) & LPFC_FCF_FPMA))
1264 continue;
1265 }
1266
1267 /*
1268 * This fcf record matches filtering criteria.
1269 */
1270 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1271 *boot_flag = 1;
1272 else
1273 *boot_flag = 0;
1274
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record);
1277 /*
1278 * If the user specified a required address mode, assign that
1279 * address mode
1280 */
1281 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1282 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1283 *addr_mode = (conn_entry->conn_rec.flags &
1284 FCFCNCT_AM_SPMA) ?
1285 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1286 /*
1287 * If the user specified a prefered address mode, use the
1288 * addr mode only if FCF support the addr_mode.
1289 */
1290 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1291 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1292 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1293 (*addr_mode & LPFC_FCF_SPMA))
1294 *addr_mode = LPFC_FCF_SPMA;
1295 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1296 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag;
1309 else
1310 *vlan_id = 0xFFFF;
1311
1312 return 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1320 * @phba: pointer to lpfc hba data structure.
1321 * @mboxq: pointer to mailbox object.
1322 *
1323 * This function iterate through all the fcf records available in
1324 * HBA and choose the optimal FCF record for discovery. After finding
1325 * the FCF for discovery it register the FCF record and kick start
1326 * discovery.
1327 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1328 * use a FCF record which match fabric name and mac address of the
1329 * currently used FCF record.
1330 * If the driver support only one FCF, it will try to use the FCF record
1331 * used by BOOT_BIOS.
1332 */
1333void
1334lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1335{
1336 void *virt_addr;
1337 dma_addr_t phys_addr;
1338 uint8_t *bytep;
1339 struct lpfc_mbx_sge sge;
1340 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1341 uint32_t shdr_status, shdr_add_status;
1342 union lpfc_sli4_cfg_shdr *shdr;
1343 struct fcf_record *new_fcf_record;
1344 int rc;
1345 uint32_t boot_flag, addr_mode;
1346 uint32_t next_fcf_index;
1347 unsigned long flags;
1348 uint16_t vlan_id;
1349
1350 /* Get the first SGE entry from the non-embedded DMA memory. This
1351 * routine only uses a single SGE.
1352 */
1353 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1354 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1355 if (unlikely(!mboxq->sge_array)) {
1356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1357 "2524 Failed to get the non-embedded SGE "
1358 "virtual address\n");
1359 goto out;
1360 }
1361 virt_addr = mboxq->sge_array->addr[0];
1362
1363 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1364 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1365 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1366 &shdr->response);
1367 /*
1368 * The FCF Record was read and there is no reason for the driver
1369 * to maintain the FCF record data or memory. Instead, just need
1370 * to book keeping the FCFIs can be used.
1371 */
1372 if (shdr_status || shdr_add_status) {
1373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1374 "2521 READ_FCF_RECORD mailbox failed "
1375 "with status x%x add_status x%x, mbx\n",
1376 shdr_status, shdr_add_status);
1377 goto out;
1378 }
1379 /* Interpreting the returned information of FCF records */
1380 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1381 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1382 sizeof(struct lpfc_mbx_read_fcf_tbl));
1383 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1384
1385 new_fcf_record = (struct fcf_record *)(virt_addr +
1386 sizeof(struct lpfc_mbx_read_fcf_tbl));
1387 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1388 sizeof(struct fcf_record));
1389 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1390
1391 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1392 &boot_flag, &addr_mode,
1393 &vlan_id);
1394 /*
1395 * If the fcf record does not match with connect list entries
1396 * read the next entry.
1397 */
1398 if (!rc)
1399 goto read_next_fcf;
1400 /*
1401 * If this is not the first FCF discovery of the HBA, use last
1402 * FCF record for the discovery.
1403 */
1404 spin_lock_irqsave(&phba->hbalock, flags);
1405 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1406 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1407 new_fcf_record) &&
1408 lpfc_mac_addr_match(phba, new_fcf_record)) {
1409 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1410 spin_unlock_irqrestore(&phba->hbalock, flags);
1411 goto out;
1412 }
1413 spin_unlock_irqrestore(&phba->hbalock, flags);
1414 goto read_next_fcf;
1415 }
1416 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1417 /*
1418 * If the current FCF record does not have boot flag
1419 * set and new fcf record has boot flag set, use the
1420 * new fcf record.
1421 */
1422 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1423 /* Use this FCF record */
1424 lpfc_copy_fcf_record(phba, new_fcf_record);
1425 phba->fcf.addr_mode = addr_mode;
1426 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1427 if (vlan_id != 0xFFFF) {
1428 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1429 phba->fcf.vlan_id = vlan_id;
1430 }
1431 spin_unlock_irqrestore(&phba->hbalock, flags);
1432 goto read_next_fcf;
1433 }
1434 /*
1435 * If the current FCF record has boot flag set and the
1436 * new FCF record does not have boot flag, read the next
1437 * FCF record.
1438 */
1439 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1440 spin_unlock_irqrestore(&phba->hbalock, flags);
1441 goto read_next_fcf;
1442 }
1443 /*
1444 * If there is a record with lower priority value for
1445 * the current FCF, use that record.
1446 */
1447 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1448 && (new_fcf_record->fip_priority <
1449 phba->fcf.priority)) {
1450 /* Use this FCF record */
1451 lpfc_copy_fcf_record(phba, new_fcf_record);
1452 phba->fcf.addr_mode = addr_mode;
1453 if (vlan_id != 0xFFFF) {
1454 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1455 phba->fcf.vlan_id = vlan_id;
1456 }
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
1458 goto read_next_fcf;
1459 }
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * This is the first available FCF record, use this
1465 * record.
1466 */
1467 lpfc_copy_fcf_record(phba, new_fcf_record);
1468 phba->fcf.addr_mode = addr_mode;
1469 if (boot_flag)
1470 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1471 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1472 if (vlan_id != 0xFFFF) {
1473 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1474 phba->fcf.vlan_id = vlan_id;
1475 }
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 goto read_next_fcf;
1478
1479read_next_fcf:
1480 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1481 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1482 lpfc_register_fcf(phba);
1483 else
1484 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1485 return;
1486
1487out:
1488 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1489 lpfc_register_fcf(phba);
1490
1491 return;
1492}
1493
1494/**
1495 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This function loops through the list of vports on the @phba and issues an
1499 * FDISC if possible.
1500 */
1501void
1502lpfc_start_fdiscs(struct lpfc_hba *phba)
1503{
1504 struct lpfc_vport **vports;
1505 int i;
1506
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL) {
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1511 continue;
1512 /* There are no vpi for this vport */
1513 if (vports[i]->vpi > phba->max_vpi) {
1514 lpfc_vport_set_state(vports[i],
1515 FC_VPORT_FAILED);
1516 continue;
1517 }
1518 if (phba->fc_topology == TOPOLOGY_LOOP) {
1519 lpfc_vport_set_state(vports[i],
1520 FC_VPORT_LINKDOWN);
1521 continue;
1522 }
1523 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1524 lpfc_initial_fdisc(vports[i]);
1525 else {
1526 lpfc_vport_set_state(vports[i],
1527 FC_VPORT_NO_FABRIC_SUPP);
1528 lpfc_printf_vlog(vports[i], KERN_ERR,
1529 LOG_ELS,
1530 "0259 No NPIV "
1531 "Fabric support\n");
1532 }
1533 }
1534 }
1535 lpfc_destroy_vport_work_array(phba, vports);
1536}
1537
1538void
1539lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1540{
1541 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1542 struct lpfc_vport *vport = mboxq->vport;
1543
1544 if (mboxq->u.mb.mbxStatus) {
1545 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1546 "2018 REG_VFI mbxStatus error x%x "
1547 "HBA state x%x\n",
1548 mboxq->u.mb.mbxStatus, vport->port_state);
1549 if (phba->fc_topology == TOPOLOGY_LOOP) {
1550 /* FLOGI failed, use loop map to make discovery list */
1551 lpfc_disc_list_loopmap(vport);
1552 /* Start discovery */
1553 lpfc_disc_start(vport);
1554 goto fail_free_mem;
1555 }
1556 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1557 goto fail_free_mem;
1558 }
1559 /* Mark the vport has registered with its VFI */
1560 vport->vfi_state |= LPFC_VFI_REGISTERED;
1561
1562 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1563 lpfc_start_fdiscs(phba);
1564 lpfc_do_scr_ns_plogi(phba, vport);
1565 }
1566
1567fail_free_mem:
1568 mempool_free(mboxq, phba->mbox_mem_pool);
1569 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1570 kfree(dmabuf);
1571 return;
1572}
1573
1574static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1575lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1576{
964 MAILBOX_t *mb = &pmb->mb; 1577 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1578 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1579 struct lpfc_vport *vport = pmb->vport;
967 1580
@@ -1012,13 +1625,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1625lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1626{
1014 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1628 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1629 int i;
1017 struct lpfc_dmabuf *mp; 1630 struct lpfc_dmabuf *mp;
1018 int rc; 1631 int rc;
1632 struct fcf_record *fcf_record;
1019 1633
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1634 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1635
1023 spin_lock_irq(&phba->hbalock); 1636 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1637 switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1647 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1648 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1649 break;
1650 case LA_10GHZ_LINK:
1651 phba->fc_linkspeed = LA_10GHZ_LINK;
1652 break;
1037 default: 1653 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1654 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1655 break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1731 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1732 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1733 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1734 goto out;
1121 } 1735 }
1122 } 1736 }
1123 1737
1124 if (cfglink_mbox) { 1738 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1739 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 if (!cfglink_mbox)
1741 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1742 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1743 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1744 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1745 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1746 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1747 if (rc == MBX_NOT_FINISHED) {
1131 return; 1748 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1749 goto out;
1750 }
1751 } else {
1752 /*
1753 * Add the driver's default FCF record at FCF index 0 now. This
1754 * is phase 1 implementation that support FCF index 0 and driver
1755 * defaults.
1756 */
1757 if (phba->cfg_enable_fip == 0) {
1758 fcf_record = kzalloc(sizeof(struct fcf_record),
1759 GFP_KERNEL);
1760 if (unlikely(!fcf_record)) {
1761 lpfc_printf_log(phba, KERN_ERR,
1762 LOG_MBOX | LOG_SLI,
1763 "2554 Could not allocate memmory for "
1764 "fcf record\n");
1765 rc = -ENODEV;
1766 goto out;
1767 }
1768
1769 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1770 LPFC_FCOE_FCF_DEF_INDEX);
1771 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1772 if (unlikely(rc)) {
1773 lpfc_printf_log(phba, KERN_ERR,
1774 LOG_MBOX | LOG_SLI,
1775 "2013 Could not manually add FCF "
1776 "record 0, status %d\n", rc);
1777 rc = -ENODEV;
1778 kfree(fcf_record);
1779 goto out;
1780 }
1781 kfree(fcf_record);
1782 }
1783 /*
1784 * The driver is expected to do FIP/FCF. Call the port
1785 * and get the FCF Table.
1786 */
1787 rc = lpfc_sli4_read_fcf_record(phba,
1788 LPFC_FCOE_FCF_GET_FIRST);
1789 if (rc)
1790 goto out;
1133 } 1791 }
1792
1793 return;
1134out: 1794out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1795 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1807 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1808 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1809 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1810 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1811 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1812 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1813 writel(control, phba->HCregaddr);
1814 readl(phba->HCregaddr); /* flush */
1815 }
1154 spin_unlock_irq(&phba->hbalock); 1816 spin_unlock_irq(&phba->hbalock);
1155} 1817}
1156 1818
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1821{
1160 lpfc_linkdown(phba); 1822 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1823 lpfc_enable_la(phba);
1824 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1825 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1826}
1164 1827
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1838 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1840 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1841 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1842 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1843
1181 /* Unblock ELS traffic */ 1844 /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1853 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1854 }
1192 1855
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1856 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1857
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1858 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1859
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 1991static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1992lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 1993{
1331 MAILBOX_t *mb = &pmb->mb; 1994 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 1995 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 1997
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2044{
1382 struct lpfc_vport *vport = pmb->vport; 2045 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2047 MAILBOX_t *mb = &pmb->u.mb;
1385 2048
1386 switch (mb->mbxStatus) { 2049 switch (mb->mbxStatus) {
1387 case 0x0011: 2050 case 0x0011:
@@ -1416,6 +2079,128 @@ out:
1416 return; 2079 return;
1417} 2080}
1418 2081
2082/**
2083 * lpfc_create_static_vport - Read HBA config region to create static vports.
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine issue a DUMP mailbox command for config region 22 to get
2087 * the list of static vports to be created. The function create vports
2088 * based on the information returned from the HBA.
2089 **/
2090void
2091lpfc_create_static_vport(struct lpfc_hba *phba)
2092{
2093 LPFC_MBOXQ_t *pmb = NULL;
2094 MAILBOX_t *mb;
2095 struct static_vport_info *vport_info;
2096 int rc, i;
2097 struct fc_vport_identifiers vport_id;
2098 struct fc_vport *new_fc_vport;
2099 struct Scsi_Host *shost;
2100 struct lpfc_vport *vport;
2101 uint16_t offset = 0;
2102 uint8_t *vport_buff;
2103
2104 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2105 if (!pmb) {
2106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2107 "0542 lpfc_create_static_vport failed to"
2108 " allocate mailbox memory\n");
2109 return;
2110 }
2111
2112 mb = &pmb->u.mb;
2113
2114 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2115 if (!vport_info) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2117 "0543 lpfc_create_static_vport failed to"
2118 " allocate vport_info\n");
2119 mempool_free(pmb, phba->mbox_mem_pool);
2120 return;
2121 }
2122
2123 vport_buff = (uint8_t *) vport_info;
2124 do {
2125 lpfc_dump_static_vport(phba, pmb, offset);
2126 pmb->vport = phba->pport;
2127 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2128
2129 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2130 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2131 "0544 lpfc_create_static_vport failed to"
2132 " issue dump mailbox command ret 0x%x "
2133 "status 0x%x\n",
2134 rc, mb->mbxStatus);
2135 goto out;
2136 }
2137
2138 if (mb->un.varDmp.word_cnt >
2139 sizeof(struct static_vport_info) - offset)
2140 mb->un.varDmp.word_cnt =
2141 sizeof(struct static_vport_info) - offset;
2142
2143 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2144 vport_buff + offset,
2145 mb->un.varDmp.word_cnt);
2146 offset += mb->un.varDmp.word_cnt;
2147
2148 } while (mb->un.varDmp.word_cnt &&
2149 offset < sizeof(struct static_vport_info));
2150
2151
2152 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2153 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2154 != VPORT_INFO_REV)) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2156 "0545 lpfc_create_static_vport bad"
2157 " information header 0x%x 0x%x\n",
2158 le32_to_cpu(vport_info->signature),
2159 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2160
2161 goto out;
2162 }
2163
2164 shost = lpfc_shost_from_vport(phba->pport);
2165
2166 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2167 memset(&vport_id, 0, sizeof(vport_id));
2168 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2169 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2170 if (!vport_id.port_name || !vport_id.node_name)
2171 continue;
2172
2173 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2174 vport_id.vport_type = FC_PORTTYPE_NPIV;
2175 vport_id.disable = false;
2176 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2177
2178 if (!new_fc_vport) {
2179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2180 "0546 lpfc_create_static_vport failed to"
2181 " create vport \n");
2182 continue;
2183 }
2184
2185 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2186 vport->vport_flag |= STATIC_VPORT;
2187 }
2188
2189out:
2190 /*
2191 * If this is timed out command, setting NULL to context2 tell SLI
2192 * layer not to use this buffer.
2193 */
2194 spin_lock_irq(&phba->hbalock);
2195 pmb->context2 = NULL;
2196 spin_unlock_irq(&phba->hbalock);
2197 kfree(vport_info);
2198 if (rc != MBX_TIMEOUT)
2199 mempool_free(pmb, phba->mbox_mem_pool);
2200
2201 return;
2202}
2203
1419/* 2204/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2205 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2206 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2211lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2212{
1428 struct lpfc_vport *vport = pmb->vport; 2213 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2214 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2215 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2216 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2217
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2218 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2219 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2220 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2221 if (mb->mbxStatus) {
2222 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2223 "0258 Register Fabric login error: 0x%x\n",
2224 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2226 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2227 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2240 }
1455 2241
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2243 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2244 * to the ndlp are done.
1462 */ 2245 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2248 }
1466 2249
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2250 ndlp->nlp_rpi = mb->un.varWords[0];
2251 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2252 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2254
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2255 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2256 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2257 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2258 }
1498 2259
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2277void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2278lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2279{
1519 MAILBOX_t *mb = &pmb->mb; 2280 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2281 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2282 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2283 struct lpfc_vport *vport = pmb->vport;
1523 2284
1524 if (mb->mbxStatus) { 2285 if (mb->mbxStatus) {
1525out: 2286out:
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2288 "0260 Register NameServer error: 0x%x\n",
2289 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2290 /* decrement the node reference count held for this
1527 * callback function. 2291 * callback function.
1528 */ 2292 */
@@ -1546,15 +2310,13 @@ out:
1546 return; 2310 return;
1547 } 2311 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2313 return;
1553 } 2314 }
1554 2315
1555 pmb->context1 = NULL; 2316 pmb->context1 = NULL;
1556 2317
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2318 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2320 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2321 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2322
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2817 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2818 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2819 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2820 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2821 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2822 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2823 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2864 */
2103 psli = &phba->sli; 2865 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2866 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2867 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2868 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2869 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2870 pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2912 LPFC_MBOXQ_t *mbox;
2151 int rc; 2913 int rc;
2152 2914
2153 if (ndlp->nlp_rpi) { 2915 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2917 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2918 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2924 }
2163 lpfc_no_rpi(phba, ndlp); 2925 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2926 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2165 return 1; 2928 return 1;
2166 } 2929 }
2167 return 0; 2930 return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3015
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3016 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3017 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3018 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3019 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3020 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3021 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3024
2262 spin_lock_irq(&phba->hbalock); 3025 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3026 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3027 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3028 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3029 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3030 if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3072 int rc;
2310 3073
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3074 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3075 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3076 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3077 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3078 * allocated by the firmware.
2315 */ 3079 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3080 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3081 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3082 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3083 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3084 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3085 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3317 * clear_la then don't send it.
2554 */ 3318 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3319 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3320 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3321 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3322 return;
2558 3323
2559 /* Link up discovery */ 3324 /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3347
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3348 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3349 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3350 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3351 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3352 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3353 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3407 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3408 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3409 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3410 !(vport->fc_flag & FC_RSCN_MODE) &&
3411 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3412 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3413 return;
2648 } 3414 }
@@ -2919,11 +3685,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3685 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3686 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3687 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3688 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3690 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3691 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3692 lpfc_issue_clear_la(phba, vport);
3693 vport->port_state = LPFC_VPORT_READY;
3694 }
2927 } 3695 }
2928 3696
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3697 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3707 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3708 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3709 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3710 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3711 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3712 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3713 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3727 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3728 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3729 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3730 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3732 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3733 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3734 lpfc_issue_clear_la(phba, vport);
3735 vport->port_state = LPFC_VPORT_READY;
3736 }
2967 } 3737 }
2968 break; 3738 break;
2969 3739
@@ -3036,7 +3806,7 @@ restart_disc:
3036void 3806void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3807lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3808{
3039 MAILBOX_t *mb = &pmb->mb; 3809 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3810 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3812 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3814 pmb->context1 = NULL;
3045 3815
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3816 ndlp->nlp_rpi = mb->un.varWords[0];
3817 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3818 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3819 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3820
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4068 return 1;
3298 return 0; 4069 return 0;
3299} 4070}
4071
4072/**
4073 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4074 * @phba: Pointer to hba context object.
4075 *
4076 * This function iterate through all FC nodes associated
4077 * will all vports to check if there is any node with
4078 * fc_rports associated with it. If there is an fc_rport
4079 * associated with the node, then the node is either in
4080 * discovered state or its devloss_timer is pending.
4081 */
4082static int
4083lpfc_fcf_inuse(struct lpfc_hba *phba)
4084{
4085 struct lpfc_vport **vports;
4086 int i, ret = 0;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089
4090 vports = lpfc_create_vport_work_array(phba);
4091
4092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4093 shost = lpfc_shost_from_vport(vports[i]);
4094 spin_lock_irq(shost->host_lock);
4095 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4096 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4097 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4098 ret = 1;
4099 spin_unlock_irq(shost->host_lock);
4100 goto out;
4101 }
4102 }
4103 spin_unlock_irq(shost->host_lock);
4104 }
4105out:
4106 lpfc_destroy_vport_work_array(phba, vports);
4107 return ret;
4108}
4109
4110/**
4111 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4112 * @phba: Pointer to hba context object.
4113 * @mboxq: Pointer to mailbox object.
4114 *
4115 * This function frees memory associated with the mailbox command.
4116 */
4117static void
4118lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4119{
4120 struct lpfc_vport *vport = mboxq->vport;
4121
4122 if (mboxq->u.mb.mbxStatus) {
4123 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4124 "2555 UNREG_VFI mbxStatus error x%x "
4125 "HBA state x%x\n",
4126 mboxq->u.mb.mbxStatus, vport->port_state);
4127 }
4128 mempool_free(mboxq, phba->mbox_mem_pool);
4129 return;
4130}
4131
4132/**
4133 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4134 * @phba: Pointer to hba context object.
4135 * @mboxq: Pointer to mailbox object.
4136 *
4137 * This function frees memory associated with the mailbox command.
4138 */
4139static void
4140lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4141{
4142 struct lpfc_vport *vport = mboxq->vport;
4143
4144 if (mboxq->u.mb.mbxStatus) {
4145 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4146 "2550 UNREG_FCFI mbxStatus error x%x "
4147 "HBA state x%x\n",
4148 mboxq->u.mb.mbxStatus, vport->port_state);
4149 }
4150 mempool_free(mboxq, phba->mbox_mem_pool);
4151 return;
4152}
4153
4154/**
4155 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4156 * @phba: Pointer to hba context object.
4157 *
4158 * This function check if there are any connected remote port for the FCF and
4159 * if all the devices are disconnected, this function unregister FCFI.
4160 * This function also tries to use another FCF for discovery.
4161 */
4162void
4163lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4164{
4165 LPFC_MBOXQ_t *mbox;
4166 int rc;
4167 struct lpfc_vport **vports;
4168 int i;
4169
4170 spin_lock_irq(&phba->hbalock);
4171 /*
4172 * If HBA is not running in FIP mode or
4173 * If HBA does not support FCoE or
4174 * If FCF is not registered.
4175 * do nothing.
4176 */
4177 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4178 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4179 (phba->cfg_enable_fip == 0)) {
4180 spin_unlock_irq(&phba->hbalock);
4181 return;
4182 }
4183 spin_unlock_irq(&phba->hbalock);
4184
4185 if (lpfc_fcf_inuse(phba))
4186 return;
4187
4188
4189 /* Unregister VPIs */
4190 vports = lpfc_create_vport_work_array(phba);
4191 if (vports &&
4192 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4193 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4194 lpfc_mbx_unreg_vpi(vports[i]);
4195 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4197 }
4198 lpfc_destroy_vport_work_array(phba, vports);
4199
4200 /* Unregister VFI */
4201 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4202 if (!mbox) {
4203 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4204 "2556 UNREG_VFI mbox allocation failed"
4205 "HBA state x%x\n",
4206 phba->pport->port_state);
4207 return;
4208 }
4209
4210 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4211 mbox->vport = phba->pport;
4212 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4213
4214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4215 if (rc == MBX_NOT_FINISHED) {
4216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4217 "2557 UNREG_VFI issue mbox failed rc x%x "
4218 "HBA state x%x\n",
4219 rc, phba->pport->port_state);
4220 mempool_free(mbox, phba->mbox_mem_pool);
4221 return;
4222 }
4223
4224 /* Unregister FCF */
4225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4226 if (!mbox) {
4227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4228 "2551 UNREG_FCFI mbox allocation failed"
4229 "HBA state x%x\n",
4230 phba->pport->port_state);
4231 return;
4232 }
4233
4234 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4235 mbox->vport = phba->pport;
4236 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4238
4239 if (rc == MBX_NOT_FINISHED) {
4240 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4241 "2552 UNREG_FCFI issue mbox failed rc x%x "
4242 "HBA state x%x\n",
4243 rc, phba->pport->port_state);
4244 mempool_free(mbox, phba->mbox_mem_pool);
4245 return;
4246 }
4247
4248 spin_lock_irq(&phba->hbalock);
4249 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4250 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4251 FCF_VALID_VLAN);
4252 spin_unlock_irq(&phba->hbalock);
4253
4254 /*
4255 * If driver is not unloading, check if there is any other
4256 * FCF record that can be used for discovery.
4257 */
4258 if ((phba->pport->load_flag & FC_UNLOADING) ||
4259 (phba->link_state < LPFC_LINK_UP))
4260 return;
4261
4262 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4263
4264 if (rc)
4265 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4266 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4267 " record HBA state x%x\n",
4268 phba->pport->port_state);
4269}
4270
4271/**
4272 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4273 * @phba: Pointer to hba context object.
4274 * @buff: Buffer containing the FCF connection table as in the config
4275 * region.
4276 * This function create driver data structure for the FCF connection
4277 * record table read from config region 23.
4278 */
4279static void
4280lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4281 uint8_t *buff)
4282{
4283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4284 struct lpfc_fcf_conn_hdr *conn_hdr;
4285 struct lpfc_fcf_conn_rec *conn_rec;
4286 uint32_t record_count;
4287 int i;
4288
4289 /* Free the current connect table */
4290 list_for_each_entry_safe(conn_entry, next_conn_entry,
4291 &phba->fcf_conn_rec_list, list)
4292 kfree(conn_entry);
4293
4294 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4295 record_count = conn_hdr->length * sizeof(uint32_t)/
4296 sizeof(struct lpfc_fcf_conn_rec);
4297
4298 conn_rec = (struct lpfc_fcf_conn_rec *)
4299 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4300
4301 for (i = 0; i < record_count; i++) {
4302 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4303 continue;
4304 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4305 GFP_KERNEL);
4306 if (!conn_entry) {
4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4308 "2566 Failed to allocate connection"
4309 " table entry\n");
4310 return;
4311 }
4312
4313 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4314 sizeof(struct lpfc_fcf_conn_rec));
4315 conn_entry->conn_rec.vlan_tag =
4316 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4317 conn_entry->conn_rec.flags =
4318 le16_to_cpu(conn_entry->conn_rec.flags);
4319 list_add_tail(&conn_entry->list,
4320 &phba->fcf_conn_rec_list);
4321 }
4322}
4323
4324/**
4325 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4326 * @phba: Pointer to hba context object.
4327 * @buff: Buffer containing the FCoE parameter data structure.
4328 *
4329 * This function update driver data structure with config
4330 * parameters read from config region 23.
4331 */
4332static void
4333lpfc_read_fcoe_param(struct lpfc_hba *phba,
4334 uint8_t *buff)
4335{
4336 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4337 struct lpfc_fcoe_params *fcoe_param;
4338
4339 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4340 buff;
4341 fcoe_param = (struct lpfc_fcoe_params *)
4342 buff + sizeof(struct lpfc_fip_param_hdr);
4343
4344 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4345 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4346 return;
4347
4348 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4349 FIPP_MODE_ON)
4350 phba->cfg_enable_fip = 1;
4351
4352 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4353 FIPP_MODE_OFF)
4354 phba->cfg_enable_fip = 0;
4355
4356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4357 phba->valid_vlan = 1;
4358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4359 0xFFF;
4360 }
4361
4362 phba->fc_map[0] = fcoe_param->fc_map[0];
4363 phba->fc_map[1] = fcoe_param->fc_map[1];
4364 phba->fc_map[2] = fcoe_param->fc_map[2];
4365 return;
4366}
4367
4368/**
4369 * lpfc_get_rec_conf23 - Get a record type in config region data.
4370 * @buff: Buffer containing config region 23 data.
4371 * @size: Size of the data buffer.
4372 * @rec_type: Record type to be searched.
4373 *
4374 * This function searches config region data to find the begining
4375 * of the record specified by record_type. If record found, this
4376 * function return pointer to the record else return NULL.
4377 */
4378static uint8_t *
4379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4380{
4381 uint32_t offset = 0, rec_length;
4382
4383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4384 (size < sizeof(uint32_t)))
4385 return NULL;
4386
4387 rec_length = buff[offset + 1];
4388
4389 /*
4390 * One TLV record has one word header and number of data words
4391 * specified in the rec_length field of the record header.
4392 */
4393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4394 <= size) {
4395 if (buff[offset] == rec_type)
4396 return &buff[offset];
4397
4398 if (buff[offset] == LPFC_REGION23_LAST_REC)
4399 return NULL;
4400
4401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4402 rec_length = buff[offset + 1];
4403 }
4404 return NULL;
4405}
4406
4407/**
4408 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4409 * @phba: Pointer to lpfc_hba data structure.
4410 * @buff: Buffer containing config region 23 data.
4411 * @size: Size of the data buffer.
4412 *
4413 * This fuction parse the FCoE config parameters in config region 23 and
4414 * populate driver data structure with the parameters.
4415 */
4416void
4417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4418 uint8_t *buff,
4419 uint32_t size)
4420{
4421 uint32_t offset = 0, rec_length;
4422 uint8_t *rec_ptr;
4423
4424 /*
4425 * If data size is less than 2 words signature and version cannot be
4426 * verified.
4427 */
4428 if (size < 2*sizeof(uint32_t))
4429 return;
4430
4431 /* Check the region signature first */
4432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2567 Config region 23 has bad signature\n");
4435 return;
4436 }
4437
4438 offset += 4;
4439
4440 /* Check the data structure version */
4441 if (buff[offset] != LPFC_REGION23_VERSION) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "2568 Config region 23 has bad version\n");
4444 return;
4445 }
4446 offset += 4;
4447
4448 rec_length = buff[offset + 1];
4449
4450 /* Read FCoE param record */
4451 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4452 size - offset, FCOE_PARAM_TYPE);
4453 if (rec_ptr)
4454 lpfc_read_fcoe_param(phba, rec_ptr);
4455
4456 /* Read FCF connection table */
4457 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4458 size - offset, FCOE_CONN_TBL_TYPE);
4459 if (rec_ptr)
4460 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4461
4462}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b..02aa016b93e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1155 1187
1156#define JEDEC_ID_ADDRESS 0x0080001c 1188#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1189#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1374#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1375#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1376#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1377
1348#define MBX_WRITE_WWN 0x98 1378#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1379#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1380#define MBX_LOAD_EXP_ROM 0x9C
1351 1381#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1382#define MBX_SLI4_REQ_FTRS 0x9D
1383#define MBX_MAX_CMDS 0x9E
1384#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1385#define MBX_SLI2_CMD_MASK 0x80
1386#define MBX_REG_VFI 0x9F
1387#define MBX_REG_FCFI 0xA0
1388#define MBX_UNREG_VFI 0xA1
1389#define MBX_UNREG_FCFI 0xA2
1390#define MBX_INIT_VFI 0xA3
1391#define MBX_INIT_VPI 0xA4
1354 1392
1355/* IOCB Commands */ 1393/* IOCB Commands */
1356 1394
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1478#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1479#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1480
1481/* Unhandled Data Security SLI Commands */
1482#define DSSCMD_IWRITE64_CR 0xD8
1483#define DSSCMD_IWRITE64_CX 0xD9
1484#define DSSCMD_IREAD64_CR 0xDA
1485#define DSSCMD_IREAD64_CX 0xDB
1486#define DSSCMD_INVALIDATE_DEK 0xDC
1487#define DSSCMD_SET_KEK 0xDD
1488#define DSSCMD_GET_KEK_ID 0xDE
1489#define DSSCMD_GEN_XFER 0xDF
1490
1443#define CMD_MAX_IOCB_CMD 0xE6 1491#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1492#define CMD_IOCB_MASK 0xff
1445 1493
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1514#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1515#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1516#define MBXERR_ERROR 16
1517#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1518#define MBX_NOT_FINISHED 255
1470 1519
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1520#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
1504#endif 1553#endif
1505}; 1554};
1506 1555
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1556typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1557#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1558 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
2287 uint32_t rsvd3; 2310 uint32_t rsvd3;
2288 uint32_t rsvd4; 2311 uint32_t rsvd4;
2289 uint32_t rsvd5; 2312 uint32_t rsvd5;
2290 uint16_t rsvd6; 2313 uint16_t vfi;
2291 uint16_t vpi; 2314 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2315#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2316 uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
2297 uint32_t rsvd4; 2320 uint32_t rsvd4;
2298 uint32_t rsvd5; 2321 uint32_t rsvd5;
2299 uint16_t vpi; 2322 uint16_t vpi;
2300 uint16_t rsvd6; 2323 uint16_t vfi;
2301#endif 2324#endif
2302} REG_VPI_VAR; 2325} REG_VPI_VAR;
2303 2326
@@ -2457,7 +2480,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2480 uint32_t entry_index:16;
2458#endif 2481#endif
2459 2482
2460 uint32_t rsvd1; 2483 uint32_t sli4_length;
2461 uint32_t word_cnt; 2484 uint32_t word_cnt;
2462 uint32_t resp_offset; 2485 uint32_t resp_offset;
2463} DUMP_VAR; 2486} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2493#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2494#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2495
2496#define DMP_REGION_VPORT 0x16 /* VPort info region */
2497#define DMP_VPORT_REGION_SIZE 0x200
2498#define DMP_MBOX_OFFSET_WORD 0x5
2499
2500#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2501#define DMP_FCOEPARAM_RGN_SIZE 0x400
2502
2473#define WAKE_UP_PARMS_REGION_ID 4 2503#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2504#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2505
2506struct vport_rec {
2507 uint8_t wwpn[8];
2508 uint8_t wwnn[8];
2509};
2510
2511#define VPORT_INFO_SIG 0x32324752
2512#define VPORT_INFO_REV_MASK 0xff
2513#define VPORT_INFO_REV 0x1
2514#define MAX_STATIC_VPORT_COUNT 16
2515struct static_vport_info {
2516 uint32_t signature;
2517 uint32_t rev;
2518 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2519 uint32_t resvd[66];
2520};
2521
2476/* Option rom version structure */ 2522/* Option rom version structure */
2477struct prog_id { 2523struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2524#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
2697#endif 2743#endif
2698 2744
2699#ifdef __BIG_ENDIAN_BITFIELD 2745#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2746 uint32_t rsvd1 : 19; /* Reserved */
2747 uint32_t cdss : 1; /* Configure Data Security SLI */
2748 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2749 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2750 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2751 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2765 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2766 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2767 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2768 uint32_t rsvd2 : 3; /* Reserved */
2769 uint32_t cdss : 1; /* Configure Data Security SLI */
2770 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2771#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2772#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2773 uint32_t rsvd3 : 19; /* Reserved */
2774 uint32_t gdss : 1; /* Configure Data Security SLI */
2775 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2776 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2777 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2778 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2792 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2793 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2794 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2795 uint32_t rsvd4 : 3; /* Reserved */
2796 uint32_t gdss : 1; /* Configure Data Security SLI */
2797 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2798#endif
2745 2799
2746#ifdef __BIG_ENDIAN_BITFIELD 2800#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
2753 2807
2754#ifdef __BIG_ENDIAN_BITFIELD 2808#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2810 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2811#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2813 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2814#endif
2761 2815
2762 uint32_t rsvd4; /* Reserved */ 2816 uint32_t rsvd6; /* Reserved */
2763 2817
2764#ifdef __BIG_ENDIAN_BITFIELD 2818#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2819 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2820 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2821#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2822 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2823 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2824#endif
2771 2825
2772} CONFIG_PORT_VAR; 2826} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3720#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3721#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3722#define SETVAR_MLORST 0x103007
3723
3724#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 00000000000..39c34b3ad29
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d..2f5907f92ee 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 483 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 487 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 488 return -EIO;
466 } 489 }
467 } 490 }
468 491
492 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 493 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 495
472 /* Enable appropriate host interrupts */ 496 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 497 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 499 if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 594{
572 struct lpfc_vport **vports; 595 struct lpfc_vport **vports;
573 int i; 596 int i;
574 /* Disable interrupts */ 597
575 writel(0, phba->HCregaddr); 598 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
577 603
578 if (phba->pport->load_flag & FC_UNLOADING) 604 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 605 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 606 else {
581 vports = lpfc_create_vport_work_array(phba); 607 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 608 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 612 lpfc_destroy_vport_work_array(phba, vports);
586 } 613 }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 615}
589 616
590/** 617/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 619 * @phba: pointer to lpfc HBA data structure.
593 * 620 *
594 * This routine will do uninitialization after the HBA is reset when bring 621 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 625 * 0 - sucess.
599 * Any other value - error. 626 * Any other value - error.
600 **/ 627 **/
601int 628static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 630{
604 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 632 struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 669
643 return 0; 670 return 0;
644} 671}
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
645 743
646/** 744/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 907 "taking this port offline.\n");
810 908
811 spin_lock_irq(&phba->hbalock); 909 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 911 spin_unlock_irq(&phba->hbalock);
814 912
815 lpfc_offline_prep(phba); 913 lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 932 struct lpfc_sli *psli = &phba->sli;
835 933
836 spin_lock_irq(&phba->hbalock); 934 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 936 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 937 lpfc_offline_prep(phba);
840 938
841 lpfc_offline(phba); 939 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 944 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 945 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 946 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 949}
850 950
851/** 951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
970/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 972 * @phba: pointer to lpfc hba data structure.
854 * 973 *
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 983 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 984 struct lpfc_sli *psli = &phba->sli;
866 985
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 997 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 998 "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1000 phba->work_status[0], phba->work_status[1]);
872 1001
873 spin_lock_irq(&phba->hbalock); 1002 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1004 spin_unlock_irq(&phba->hbalock);
876 1005
877 1006
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1039 phba->work_hs = old_host_status & ~HS_FFER1;
911 1040
1041 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1046}
916 1047
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
917/** 1063/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1065 * @phba: pointer to lpfc hba data structure.
920 * 1066 *
921 * This routine is invoked to handle the following HBA hardware error 1067 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1070 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1071 * 3 - Mailbox command came back as unknown
926 **/ 1072 **/
927void 1073static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1075{
930 struct lpfc_vport *vport = phba->pport; 1076 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1077 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1080 unsigned long temperature;
935 struct temp_event temp_event_data; 1081 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1082 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1083
939 /* If the pci channel is offline, ignore possible errors, 1084 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1085 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
942 return; 1091 return;
1092 }
1093
943 /* If resets are disabled then leave the HBA alone and return */ 1094 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1095 if (!phba->cfg_enable_hba_reset)
945 return; 1096 return;
946 1097
947 /* Send an internal error event to mgmt application */ 1098 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1099 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1100
956 if (phba->hba_flag & DEFER_ERATT) 1101 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1102 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1110 phba->work_status[0], phba->work_status[1]);
966 1111
967 spin_lock_irq(&phba->hbalock); 1112 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1114 spin_unlock_irq(&phba->hbalock);
970 1115
971 /* 1116 /*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1182}
1038 1183
1039/** 1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
1243/**
1040 * lpfc_handle_latt - The HBA link event handler 1244 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1245 * @phba: pointer to lpfc hba data structure.
1042 * 1246 *
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1341 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1342 * 1 - success
1139 **/ 1343 **/
1140static int 1344int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1346{
1143 uint8_t lenlo, lenhi; 1347 uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1496 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1497 int max_speed;
1294 int GE = 0; 1498 int GE = 0;
1499 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1500 struct {
1296 char * name; 1501 char * name;
1297 int max_speed; 1502 int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1642 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1644 break;
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1440 default: 1653 default:
1441 m = (typeof(m)){ NULL }; 1654 m = (typeof(m)){ NULL };
1442 break; 1655 break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1657
1445 if (mdp && mdp[0] == '\0') 1658 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1659 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1660 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1661 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1662 */
1450 m.name, m.max_speed, 1663 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1664 if (oneConnect)
1452 m.bus, 1665 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
1454} 1678}
1455 1679
1456/** 1680/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1758 icmd->ulpLe = 1;
1535 1759
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1763 kfree(mp1);
1539 cnt++; 1764 cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1986 * Lets wait for this to happen, if needed.
1762 */ 1987 */
1763 while (!list_empty(&vport->fc_nodes)) { 1988 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1989 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1991 "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2006 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2007 msleep(10);
1784 } 2008 }
1785 return;
1786} 2009}
1787 2010
1788/** 2011/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2026}
1804 2027
1805/** 2028/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2030 * @phba: pointer to lpfc hba data structure.
1808 * 2031 *
1809 * This routine stops all the timers associated with a HBA. This function is 2032 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2033 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2034 **/
1812static void 2035void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2037{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2038 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2039 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2040 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
1822 return; 2059 return;
1823} 2060}
1824 2061
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2115 return 1;
1879 } 2116 }
1880 2117
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2118 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
1884 } 2128 }
1885 2129
1886 vports = lpfc_create_vport_work_array(phba); 2130 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2131 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2133 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2134 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2135 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2191 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2192 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2193 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2195 struct Scsi_Host *shost;
1952 2196
1953 if (vports[i]->load_flag & FC_UNLOADING) 2197 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2198 continue;
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2200 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2201 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2202 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2220 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2221 lpfc_destroy_vport_work_array(phba, vports);
1977 2222
1978 lpfc_sli_flush_mbox_queue(phba); 2223 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2224}
1980 2225
1981/** 2226/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2242 return;
1998 2243
1999 /* stop all timers associated with this hba */ 2244 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2245 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2246 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2247 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2249 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2250 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2258 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2259 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2260 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2262 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2263 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2264 vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2351 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2352 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
2109 2358
2110 /* 2359 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2360 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2372
2124 /* Initialize all internally managed lists. */ 2373 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2376 spin_lock_init(&vport->work_port_lock);
2127 2377
2128 init_timer(&vport->fc_disctmo); 2378 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2564}
2315 2565
2316/** 2566/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
2573 **/
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
2576{
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2583
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
2588
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
2607
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
2620
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2637
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
2643 }
2644
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2650 /*
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2653 */
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2658
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2664 }
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2675 }
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2678}
2679
2680/**
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2684 *
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
2690 **/
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2694{
2695 uint16_t latt_fault;
2696
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
2711}
2712
2713/**
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2717 *
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2720 *
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2726{
2727 uint8_t att_type;
2728
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
2747 }
2748 return att_type;
2749}
2750
2751/**
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2755 *
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2764{
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
2878 return;
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
3062}
3063
3064/**
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
3095/**
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
3101 *
3102 * Return codes
3103 * 0 - sucessful
3104 * other values - error
3105 **/
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
3108{
3109 struct pci_dev *pdev;
3110 int bars;
3111
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3129
3130 return 0;
3131
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
3136}
3137
3138/**
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
3144 **/
3145static void
3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
3147{
3148 struct pci_dev *pdev;
3149 int bars;
3150
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3163
3164 return;
3165}
3166
3167/**
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3170 *
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3175 **/
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
3178{
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
3190
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
3206
3207 /*
3208 * Initialize timers used by driver
3209 */
3210
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
3216 psli = &phba->sli;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3233
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3237
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3240 /*
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3244 */
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3253 }
3254
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3258
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3262
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3268
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
3272
3273 return 0;
3274}
3275
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3288
3289 return;
3290}
3291
3292/**
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3295 *
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3298 *
3299 * Return codes
3300 * 0 - sucessful
3301 * other values - error
3302 **/
3303static int
3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3305{
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3315 /*
3316 * Initialize timers used by driver
3317 */
3318
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3323
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3349
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3355
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3381
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3386
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3394
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3403
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3407
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3424
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
3717 }
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3750 }
3751
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3754 kfree(iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3758 }
3759 iocbq_entry->sli4_xritag = NO_XRI;
3760
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3765 }
3766
3767 return 0;
3768
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
3771
3772 return -ENOMEM;
3773}
3774
3775/**
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3778 *
3779 * This routine is invoked to free the driver's sgl list and memory.
3780 **/
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3783{
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
3787
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3791
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3803 }
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
3806
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3826}
3827
3828/**
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3831 *
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3835 **/
3836static void
3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3838{
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3840}
3841
3842/**
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3845 *
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3848 *
3849 * Return codes
3850 * 0 - sucessful
3851 * other values - error
3852 **/
3853static int
3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3855{
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3877
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
3882
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
3890
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
2318 * @phba: pointer to lpfc hba data structure. 4090 * @phba: pointer to lpfc hba data structure.
2319 * 4091 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 4092 * This routine is invoked to remove all memory resources allocated
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 4093 * to support rpis. This routine presumes the caller has released all
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 4094 * rpis consumed by fabric or port logins and is prepared to have
2323 * on the current availability of PCI vector resources. The device driver is 4095 * the header pages removed.
2324 * responsible for calling the individual request_irq() to register each MSI-X 4096 **/
2325 * vector with a interrupt handler, which is done in this function. Note that 4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4202
4203 return 0;
4204}
4205
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4478 *
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4481 *
4482 * Return 0 if successful, otherwise -ENODEV.
4483 **/
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4486{
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4490
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
4493
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
4509 }
4510
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
4525 }
4526
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
4552
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
4574
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
4585
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
4595}
4596
4597/**
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4601 *
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
4606 **/
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4609{
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
4612
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
4624}
4625
4626/**
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4629 *
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
4636 *
4637 * Return codes
4638 * 0 - sucessful
4639 * ENOMEM - could not allocated memory.
4640 **/
4641static int
4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4643{
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, bmbx_size);
4668
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
4702}
4703
4704/**
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4707 *
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
4715static void
4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4717{
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4725}
4726
4727/**
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4730 *
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4735 *
4736 * Return codes
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4740 **/
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
4743{
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
4747
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
4754 }
4755
4756 lpfc_read_config(phba, pmb);
4757
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4819 }
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
4827}
4828
4829/**
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4840 **/
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
4843{
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4848
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
4856
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
4874}
4875
4876/**
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4879 *
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4889 **/
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
4892{
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4897
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
4902
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4967
4968 /*
4969 * Create Event Queues (EQs)
4970 */
4971
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4975
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6022 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6023 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6029 * other values - error
2334 **/ 6030 **/
2335static int 6031static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6033{
2338 int rc, i; 6034 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6035 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6047 goto msi_fail_out;
2352 } else 6048 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6051 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6052 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6053 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6054 phba->msix_entries[i].entry);
2359 /* 6055 /*
2360 * Assign MSI-X vectors to interrupt handlers 6056 * Assign MSI-X vectors to interrupt handlers
2361 */ 6057 */
2362 6058
2363 /* vector-0 is associated to slow-path handler */ 6059 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6060 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6063 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6065 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6068 }
2372 6069
2373 /* vector-1 is associated to fast-path handler */ 6070 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6071 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6074
2377 if (rc) { 6075 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6100 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6101 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6103 goto mbx_fail_out;
2406 } 6104 }
2407 6105
@@ -2428,14 +6126,14 @@ msi_fail_out:
2428} 6126}
2429 6127
2430/** 6128/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6130 * @phba: pointer to lpfc hba data structure.
2433 * 6131 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6132 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6134 **/
2437static void 6135static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6137{
2440 int i; 6138 int i;
2441 6139
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6142 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6143 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
2447} 6147}
2448 6148
2449/** 6149/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6151 * @phba: pointer to lpfc hba data structure.
2452 * 6152 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6153 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6155 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
2457 * 6158 *
2458 * Return codes 6159 * Return codes
2459 * 0 - sucessful 6160 * 0 - sucessful
2460 * other values - error 6161 * other values - error
2461 */ 6162 */
2462static int 6163static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6165{
2465 int rc; 6166 int rc;
2466 6167
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6175 return rc;
2475 } 6176 }
2476 6177
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6180 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6181 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6186}
2486 6187
2487/** 6188/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6190 * @phba: pointer to lpfc hba data structure.
2490 * 6191 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6192 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
2495 */ 6197 */
2496
2497static void 6198static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6200{
2500 free_irq(phba->pcidev->irq, phba); 6201 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6202 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6204}
2504 6205
2505/** 6206/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6208 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6209 *
2510 * This routine it invoked to log the currently used active interrupt mode 6210 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
2513static void 6274static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6276{
2516 switch (intr_mode) { 6277 /* Disable the currently initialized interrupt mode */
2517 case 0: 6278 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6279 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6280 else if (phba->intr_type == MSI)
2520 break; 6281 lpfc_sli_disable_msi(phba);
2521 case 1: 6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6327 goto msi_fail_out;
2525 case 2: 6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6332 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6333 "message=%d\n", index,
2529 default: 6334 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6335 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6336 /*
2532 break; 6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
2533 } 6349 }
2534 return; 6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
2535} 6382}
2536 6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
2537static void 6391static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6393{
2540 /* Clear all interrupt enable conditions */ 6394 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6395
2547 /* Reset some HBA SLI setup states */ 6396 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
2550 6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
2551 return; 6468 return;
2552} 6469}
2553 6470
2554/** 6471/**
2555 * lpfc_enable_intr - Enable device interrupt 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6473 * @phba: pointer to lpfc hba data structure.
2557 * 6474 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6475 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6477 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6478 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
2563 * 6482 *
2564 * Return codes 6483 * Return codes
2565 * 0 - sucessful 6484 * 0 - sucessful
2566 * other values - error 6485 * other values - error
2567 **/ 6486 **/
2568static uint32_t 6487static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6489{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6490 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6491 int retval, index;
2573 6492
2574 if (cfg_mode == 2) { 6493 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6494 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6495 retval = 0;
2577 if (!retval) { 6496 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6497 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6498 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6499 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6500 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6501 phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6506
2588 /* Fallback to MSI if MSI-X initialization failed */ 6507 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6509 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6510 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6511 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6512 phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6516
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6518 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6521 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6522 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6523 phba->intr_type = INTx;
2605 intr_mode = 0; 6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
2606 } 6530 }
2607 } 6531 }
2608 return intr_mode; 6532 return intr_mode;
2609} 6533}
2610 6534
2611/** 6535/**
2612 * lpfc_disable_intr - Disable device interrupt 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6537 * @phba: pointer to lpfc hba data structure.
2614 * 6538 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6539 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6542 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6543 **/
2620static void 6544static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6546{
2623 /* Disable the currently initialized interrupt mode */ 6547 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6548 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6549 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6550 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6551 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6552 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6553 free_irq(phba->pcidev->irq, phba);
2630 6554
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6560}
2637 6561
2638/** 6562/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6564 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6565 *
2651 * Return code 6566 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6567 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6568 **/
2655static int __devinit 6569static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6570lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6571{
2658 struct lpfc_vport *vport = NULL; 6572 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676 6574
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 6575 spin_lock_irq(shost->host_lock);
2678 if (!phba) 6576 vport->load_flag |= FC_UNLOADING;
2679 goto out_release_regions; 6577 spin_unlock_irq(shost->host_lock);
2680 6578
2681 atomic_set(&phba->fast_event_count, 0); 6579 lpfc_stop_hba_timers(phba);
2682 spin_lock_init(&phba->hbalock);
2683 6580
2684 /* Initialize ndlp management spinlock */ 6581 phba->pport->work_port_events = 0;
2685 spin_lock_init(&phba->ndlp_lock);
2686 6582
2687 phba->pcidev = pdev; 6583 lpfc_sli_hba_down(phba);
2688 6584
2689 /* Assign an unused board number */ 6585 lpfc_sli_brdrestart(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6586
2693 INIT_LIST_HEAD(&phba->port_list); 6587 lpfc_sli_disable_intr(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6588
2702 /* Initialize timers used by driver */ 6589 return;
2703 init_timer(&phba->hb_tmofunc); 6590}
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6591
2707 psli = &phba->sli; 6592/**
2708 init_timer(&psli->mbox_tmo); 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2709 psli->mbox_tmo.function = lpfc_mbox_timeout; 6594 * @phba: pointer to lpfc hba data structure.
2710 psli->mbox_tmo.data = (unsigned long) phba; 6595 *
2711 init_timer(&phba->fcp_poll_timer); 6596 * This routine is invoked to unset the HBA device initialization steps to
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout; 6597 * a device with SLI-4 interface spec.
2713 phba->fcp_poll_timer.data = (unsigned long) phba; 6598 **/
2714 init_timer(&phba->fabric_block_timer); 6599static void
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
2716 phba->fabric_block_timer.data = (unsigned long) phba; 6601{
2717 init_timer(&phba->eratt_poll); 6602 struct lpfc_vport *vport = phba->pport;
2718 phba->eratt_poll.function = lpfc_poll_eratt; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6604
2721 pci_set_master(pdev); 6605 spin_lock_irq(shost->host_lock);
2722 pci_save_state(pdev); 6606 vport->load_flag |= FC_UNLOADING;
2723 pci_try_set_mwi(pdev); 6607 spin_unlock_irq(shost->host_lock);
2724 6608
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6609 phba->pport->work_port_events = 0;
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727 goto out_idr_remove;
2728 6610
2729 /* 6611 lpfc_sli4_hba_down(phba);
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6612
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6613 lpfc_sli4_disable_intr(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6614
2739 /* Map HBA SLIM to a kernel virtual address. */ 6615 return;
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6616}
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747
2748 /* Map HBA Control Registers to a kernel virtual address. */
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6617
2757 /* Allocate memory for SLI-2 structures */ 6618/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6620 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6621 *
2761 GFP_KERNEL); 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6623 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
2764 6633
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6634 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6635 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6636
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6637 /*
2772 lpfc_sli_hbq_size(), 6638 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6639 * mailbox command.
2774 GFP_KERNEL); 6640 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6641
2778 hbq_count = lpfc_sli_hbq_count(); 6642 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6643 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6645 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6646 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
2785 } 6651 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6652 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6654 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6655 mboxq = phba->sli.mbox_active;
2790 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6657 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6659 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6660 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6661 }
2802 6662
2803 /* Initialize and populate the iocb list per host. */ 6663 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6664 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6665
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6666 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6667 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6668
2825 spin_lock_irq(&phba->hbalock); 6669 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6670 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6671
2831 /* Initialize HBA structure */ 6672 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6673 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6674}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6675
2837 INIT_LIST_HEAD(&phba->work_list); 6676/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
2840 6700
2841 /* Initialize the wait queue head for the kernel thread */ 6701 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
2843 6705
2844 /* Startup the kernel thread for this host adapter. */ 6706 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6707 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6708 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6710 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6711 goto out_free_phba;
2850 } 6712 }
2851 6713
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6716 if (error)
6717 goto out_disable_pci_dev;
2855 6718
2856 /* Initialize list of fabric iocbs */ 6719 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
2858 6726
2859 /* Initialize list to save ELS buffers */ 6727 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
2861 6734
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6736 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
2865 6742
2866 shost = lpfc_shost_from_vport(vport); 6743 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
2869 6750
2870 pci_set_drvdata(pdev, shost); 6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
2871 6758
2872 phba->MBslimaddr = phba->slim_memmap_p; 6759 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6760 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6761 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
2877 6766
2878 /* Configure sysfs attributes */ 6767 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6772 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6773 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6774 }
2885 6775
6776 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6777 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6781 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6783 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6787 goto out_free_sysfs_attr;
2894 } 6788 }
2895 /* HBA SLI setup */ 6789 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6790 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6792 "1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6796
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6797 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6798 msleep(50);
2905 /* Check active interrupts received */ 6799 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6802 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6803 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6804 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6805 break;
2911 } else { 6806 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6808 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6809 "failed active interrupt test.\n",
2915 intr_mode); 6810 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6811 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6812 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6813 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6814 cfg_mode = --intr_mode;
2929 } 6815 }
2930 } 6816 }
2931 6817
2932 /* 6818 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6819 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6820
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6821 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6822 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6823
3025 return 0; 6824 return 0;
3026 6825
3027out_remove_device: 6826out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6827 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6828out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6829 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6830out_destroy_shost:
3039 destroy_port(vport); 6831 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6832out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6833 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6834out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6835 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6836out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6837 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6838out_unset_pci_mem_s3:
3047 } 6839 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6840out_disable_pci_dev:
3049out_free_hbqslimp: 6841 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6842out_free_phba:
3062 kfree(phba); 6843 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6844 return error;
3072} 6845}
3073 6846
3074/** 6847/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6849 * @pdev: pointer to PCI device
3077 * 6850 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6851 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
3081 **/ 6855 **/
3082static void __devexit 6856static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6858{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6872 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6873 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6874 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6876 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6877 lpfc_destroy_vport_work_array(phba, vports);
3104 6878
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6894 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6895 lpfc_sli_brdrestart(phba);
3122 6896
3123 lpfc_stop_phba_timers(phba); 6897 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6898 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6899 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6900 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6902 lpfc_debugfs_terminate(vport);
3129 6903
3130 /* Disable interrupt */ 6904 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6905 lpfc_sli_disable_intr(phba);
3132 6906
3133 pci_set_drvdata(pdev, NULL); 6907 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6908 scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6912 * corresponding pools here.
3139 */ 6913 */
3140 lpfc_scsi_free(phba); 6914 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6915 lpfc_mem_free_all(phba);
3142 6916
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6925 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6926 iounmap(phba->slim_memmap_p);
3153 6927
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6928 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6929
3158 pci_release_selected_regions(pdev, bars); 6930 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6931 pci_disable_device(pdev);
3160} 6932}
3161 6933
3162/** 6934/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6936 * @pdev: pointer to PCI device
3165 * @msg: power management message 6937 * @msg: power management message
3166 * 6938 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6939 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6940 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6941 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6942 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6943 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6944 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6946 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6947 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
3177 * 6950 *
3178 * Return code 6951 * Return code
3179 * 0 - driver suspended the device 6952 * 0 - driver suspended the device
3180 * Error otherwise 6953 * Error otherwise
3181 **/ 6954 **/
3182static int 6955static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6957{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6967 kthread_stop(phba->worker_thread);
3195 6968
3196 /* Disable interrupt from device */ 6969 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6970 lpfc_sli_disable_intr(phba);
3198 6971
3199 /* Save device state to PCI config space */ 6972 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6973 pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6977}
3205 6978
3206/** 6979/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6981 * @pdev: pointer to PCI device
3209 * 6982 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6983 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6985 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6986 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6987 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6990 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
3219 * 6993 *
3220 * Return code 6994 * Return code
3221 * 0 - driver suspended the device 6995 * 0 - driver suspended the device
3222 * Error otherwise 6996 * Error otherwise
3223 **/ 6997 **/
3224static int 6998static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 7000{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7024 }
3251 7025
3252 /* Configure and enable interrupt */ 7026 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7028 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7030 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7043}
3270 7044
3271/** 7045/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7047 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7048 * @state: the current PCI connection state.
3275 * 7049 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7050 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7051 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7052 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7053 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7054 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
3282 * 7057 *
3283 * Return codes 7058 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7061 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7062static pci_ers_result_t
3288 pci_channel_state_t state) 7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7064{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7087 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7088
3314 /* Disable interrupt */ 7089 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7090 lpfc_sli_disable_intr(phba);
3316 7091
3317 /* Request a slot reset. */ 7092 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7093 return PCI_ERS_RESULT_NEED_RESET;
3319} 7094}
3320 7095
3321/** 7096/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7098 * @pdev: pointer to PCI device.
3324 * 7099 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7100 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7101 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7103 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7105 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7106 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
3333 * 7109 *
3334 * Return codes 7110 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7113 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7116{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7131 pci_set_master(pdev);
3355 7132
3356 spin_lock_irq(&phba->hbalock); 7133 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7135 spin_unlock_irq(&phba->hbalock);
3359 7136
3360 /* Configure and enable interrupt */ 7137 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7139 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7141 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7155}
3379 7156
3380/** 7157/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7159 * @pdev: pointer to PCI device
3383 * 7160 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7161 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
3388 */ 7166 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7172
7173 lpfc_online(phba);
7174}
7175
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
7197}
7198
7199/**
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
7224 int mcnt;
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7236 goto out_free_phba;
7237 }
7238
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7241 if (error)
7242 goto out_disable_pci_dev;
7243
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7258 }
7259
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7291 }
7292
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7300 }
7301
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7312 error = -ENODEV;
7313 goto out_free_sysfs_attr;
7314 }
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7319 error = -ENODEV;
7320 goto out_disable_intr;
7321 }
7322
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
7335 }
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7344 }
7345
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7348
7349 return 0;
7350
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7367out_free_phba:
7368 lpfc_hba_free(phba);
7369 return error;
7370}
7371
7372/**
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7375 *
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7380 **/
7381static void __devexit
7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7383{
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7388 int i;
7389
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7394
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7397
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7408
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7411
7412 /*
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7416 */
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7419
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7426 */
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7429
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7432
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7436
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7439
7440 return;
7441}
7442
7443/**
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7459 *
7460 * Return code
7461 * 0 - driver suspended the device
7462 * Error otherwise
7463 **/
7464static int
7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7466{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7491 *
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
7502 *
7503 * Return code
7504 * 0 - driver suspended the device
7505 * Error otherwise
7506 **/
7507static int
7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7512 uint32_t intr_mode;
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7546 lpfc_online(phba);
7547
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
7551 return 0;
7552}
7553
7554/**
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7558 *
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7565 *
7566 * Return codes
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7569 **/
7570static pci_ers_result_t
7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7572{
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7579 *
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7589 *
7590 * Return codes
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7593 */
7594static pci_ers_result_t
7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7596{
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7603 *
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
7609 **/
7610static void
7611lpfc_io_resume_s4(struct pci_dev *pdev)
7612{
7613 return;
7614}
7615
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7946 { 0 }
3473}; 7947};
3474 7948
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7960 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7961 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7962 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7963 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7964 .err_handler = &lpfc_err_handler,
3491}; 7965};
3492 7966
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b01..954ba57970a 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc212..b9b451c0901 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0)
1637 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1638 resid_len = length - alloc_len;
1639 if (resid_len > PAGE_SIZE) {
1640 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1641 PAGE_SIZE);
1642 alloc_len += PAGE_SIZE;
1643 } else {
1644 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1645 resid_len);
1646 alloc_len = length;
1647 }
1648 }
1649
1650 /* Set up main header fields in mailbox command */
1651 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1652 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1653
1654 /* Set up sub-header fields into the first page */
1655 if (pagen > 0) {
1656 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1657 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1658 cfg_shdr->request.request_length =
1659 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1660 }
1661 /* The sub-header is in DMA memory, which needs endian converstion */
1662 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1663 sizeof(union lpfc_sli4_cfg_shdr));
1664
1665 return alloc_len;
1666}
1667
1668/**
1669 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1670 * @phba: pointer to lpfc hba data structure.
1671 * @mbox: pointer to lpfc mbox command.
1672 *
1673 * This routine gets the opcode from a SLI4 specific mailbox command for
1674 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1675 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1676 * returned.
1677 **/
1678uint8_t
1679lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1680{
1681 struct lpfc_mbx_sli4_config *sli4_cfg;
1682 union lpfc_sli4_cfg_shdr *cfg_shdr;
1683
1684 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1685 return 0;
1686 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1687
1688 /* For embedded mbox command, get opcode from embedded sub-header*/
1689 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1690 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1691 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1692 }
1693
1694 /* For non-embedded mbox command, get opcode from first dma page */
1695 if (unlikely(!mbox->sge_array))
1696 return 0;
1697 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1698 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1699}
1700
1701/**
1702 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1703 * @mboxq: pointer to lpfc mbox command.
1704 *
1705 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1706 * mailbox command.
1707 **/
1708void
1709lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1710{
1711 /* Set up SLI4 mailbox command header fields */
1712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1713 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1714
1715 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717
1718 /* Virtual fabrics and FIPs are not supported yet. */
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1720
1721 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg)
1723 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1724
1725 /* Enable NPIV only if configured to do so. */
1726 if (phba->max_vpi && phba->cfg_enable_npiv)
1727 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1728
1729 return;
1730}
1731
1732/**
1733 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1734 * @mbox: pointer to lpfc mbox command to initialize.
1735 * @vport: Vport associated with the VF.
1736 *
1737 * This routine initializes @mbox to all zeros and then fills in the mailbox
1738 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1739 * in the context of an FCF. The driver issues this command to setup a VFI
1740 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1741 * REG_VFI after a successful VSAN login.
1742 **/
1743void
1744lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1745{
1746 struct lpfc_mbx_init_vfi *init_vfi;
1747
1748 memset(mbox, 0, sizeof(*mbox));
1749 init_vfi = &mbox->u.mqe.un.init_vfi;
1750 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1751 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1752 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1753 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1754 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1755}
1756
1757/**
1758 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1759 * @mbox: pointer to lpfc mbox command to initialize.
1760 * @vport: vport associated with the VF.
1761 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1762 *
1763 * This routine initializes @mbox to all zeros and then fills in the mailbox
1764 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1765 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1766 * fabrics identified by VFI in the context of an FCF.
1767 **/
1768void
1769lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1770{
1771 struct lpfc_mbx_reg_vfi *reg_vfi;
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1776 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1777 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1778 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1779 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1780 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1781 reg_vfi->bde.addrLow = putPaddrLow(phys);
1782 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1783 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1784 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1785}
1786
1787/**
1788 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1789 * @mbox: pointer to lpfc mbox command to initialize.
1790 * @vpi: VPI to be initialized.
1791 *
1792 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1793 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1794 * with the virtual N Port. The SLI Host issues this command before issuing a
1795 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1796 * successful virtual NPort login.
1797 **/
1798void
1799lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1800{
1801 memset(mbox, 0, sizeof(*mbox));
1802 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1803 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1804}
1805
1806/**
1807 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1808 * @mbox: pointer to lpfc mbox command to initialize.
1809 * @vfi: VFI to be unregistered.
1810 *
1811 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1812 * (logical NPort) into the inactive state. The SLI Host must have logged out
1813 * and unregistered all remote N_Ports to abort any activity on the virtual
1814 * fabric. The SLI Port posts the mailbox response after marking the virtual
1815 * fabric inactive.
1816 **/
1817void
1818lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1819{
1820 memset(mbox, 0, sizeof(*mbox));
1821 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1822 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1823}
1824
1825/**
1826 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1827 * @phba: pointer to the hba structure containing.
1828 * @mbox: pointer to lpfc mbox command to initialize.
1829 *
1830 * This function create a SLI4 dump mailbox command to dump FCoE
1831 * parameters stored in region 23.
1832 **/
1833int
1834lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1835 struct lpfcMboxq *mbox)
1836{
1837 struct lpfc_dmabuf *mp = NULL;
1838 MAILBOX_t *mb;
1839
1840 memset(mbox, 0, sizeof(*mbox));
1841 mb = &mbox->u.mb;
1842
1843 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1844 if (mp)
1845 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1846
1847 if (!mp || !mp->virt) {
1848 kfree(mp);
1849 /* dump_fcoe_param failed to allocate memory */
1850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1851 "2569 lpfc_dump_fcoe_param: memory"
1852 " allocation failed \n");
1853 return 1;
1854 }
1855
1856 memset(mp->virt, 0, LPFC_BPL_SIZE);
1857 INIT_LIST_HEAD(&mp->list);
1858
1859 /* save address for completion */
1860 mbox->context1 = (uint8_t *) mp;
1861
1862 mb->mbxCommand = MBX_DUMP_MEMORY;
1863 mb->un.varDmp.type = DMP_NV_PARAMS;
1864 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1865 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1866 mb->un.varWords[3] = putPaddrLow(mp->phys);
1867 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1868 return 0;
1869}
1870
1871/**
1872 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1873 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1874 * @mbox: pointer to lpfc mbox command to initialize.
1875 *
1876 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1877 * SLI Host uses the command to activate an FCF after it has acquired FCF
1878 * information via a READ_FCF mailbox command. This mailbox command also is used
1879 * to indicate where received unsolicited frames from this FCF will be sent. By
1880 * default this routine will set up the FCF to forward all unsolicited frames
1881 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1882 * more complicated setups.
1883 **/
1884void
1885lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1886{
1887 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1888
1889 memset(mbox, 0, sizeof(*mbox));
1890 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1891 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1892 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1893 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1894 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1895 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1896 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1897 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1898 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1899 (~phba->fcf.addr_mode) & 0x3);
1900 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1901 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1902 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1903 }
1904}
1905
1906/**
1907 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1908 * @mbox: pointer to lpfc mbox command to initialize.
1909 * @fcfi: FCFI to be unregistered.
1910 *
1911 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1912 * The SLI Host uses the command to inactivate an FCFI.
1913 **/
1914void
1915lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1916{
1917 memset(mbox, 0, sizeof(*mbox));
1918 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1919 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1920}
1921
1922/**
1923 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1924 * @mbox: pointer to lpfc mbox command to initialize.
1925 * @ndlp: The nodelist structure that describes the RPI to resume.
1926 *
1927 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1928 * link event.
1929 **/
1930void
1931lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1932{
1933 struct lpfc_mbx_resume_rpi *resume_rpi;
1934
1935 memset(mbox, 0, sizeof(*mbox));
1936 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1937 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1938 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1939 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1940 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1941 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1942 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1943}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a97673339..e198c917c13 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41..09f659f77bb 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..7991ba1980a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 329
326 vports = lpfc_create_vport_work_array(phba); 330 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 331 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 332 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 333 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 334 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 335 new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 383
380 vports = lpfc_create_vport_work_array(phba); 384 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 385 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 387 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 388 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 389 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 431
428 vports = lpfc_create_vport_work_array(phba); 432 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 433 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 434 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 435 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 436 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 437 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 442}
439 443
440/** 444/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 445 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 446 * @vport: The virtual port for which this call being executed.
447 * @num_to_allocate: The requested number of buffers to allocate.
443 * 448 *
444 * This routine allocates a scsi buffer, which contains all the necessary 449 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 450 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 451 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 452 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 453 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 454 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 455 *
451 * Return codes: 456 * Return codes:
452 * NULL - Error 457 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 458 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 459 **/
455static struct lpfc_scsi_buf * 460static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 461lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 462{
458 struct lpfc_hba *phba = vport->phba; 463 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 464 struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 468 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 469 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 470 uint16_t iotag;
471 int bcnt;
466 472
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 473 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 474 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 475 if (!psb)
476 break;
477
478 /*
479 * Get memory from the pci pool to map the virt space to pci
480 * bus space for an I/O. The DMA buffer includes space for the
481 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
482 * necessary to support the sg_tablesize.
483 */
484 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
485 GFP_KERNEL, &psb->dma_handle);
486 if (!psb->data) {
487 kfree(psb);
488 break;
489 }
490
491 /* Initialize virtual ptrs to dma_buf region. */
492 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
493
494 /* Allocate iotag for psb->cur_iocbq. */
495 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
496 if (iotag == 0) {
497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
498 psb->data, psb->dma_handle);
499 kfree(psb);
500 break;
501 }
502 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
503
504 psb->fcp_cmnd = psb->data;
505 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
506 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /* Initialize local short-hand pointers. */
510 bpl = psb->fcp_bpl;
511 pdma_phys_fcp_cmd = psb->dma_handle;
512 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
513 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
514 sizeof(struct fcp_rsp);
515
516 /*
517 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
518 * are sg list bdes. Initialize the first two and leave the
519 * rest for queuecommand.
520 */
521 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
522 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
523 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
524 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
526
527 /* Setup the physical region for the FCP RSP */
528 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
529 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
530 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
531 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
532 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
533
534 /*
535 * Since the IOCB for the FCP I/O is built into this
536 * lpfc_scsi_buf, initialize it with all known data now.
537 */
538 iocb = &psb->cur_iocbq.iocb;
539 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
540 if ((phba->sli_rev == 3) &&
541 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
542 /* fill in immediate fcp command BDE */
543 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
544 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
545 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
546 unsli3.fcp_ext.icd);
547 iocb->un.fcpi64.bdl.addrHigh = 0;
548 iocb->ulpBdeCount = 0;
549 iocb->ulpLe = 0;
550 /* fill in responce BDE */
551 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
552 BUFF_TYPE_BDE_64;
553 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
554 sizeof(struct fcp_rsp);
555 iocb->unsli3.fcp_ext.rbde.addrLow =
556 putPaddrLow(pdma_phys_fcp_rsp);
557 iocb->unsli3.fcp_ext.rbde.addrHigh =
558 putPaddrHigh(pdma_phys_fcp_rsp);
559 } else {
560 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
561 iocb->un.fcpi64.bdl.bdeSize =
562 (2 * sizeof(struct ulp_bde64));
563 iocb->un.fcpi64.bdl.addrLow =
564 putPaddrLow(pdma_phys_bpl);
565 iocb->un.fcpi64.bdl.addrHigh =
566 putPaddrHigh(pdma_phys_bpl);
567 iocb->ulpBdeCount = 1;
568 iocb->ulpLe = 1;
569 }
570 iocb->ulpClass = CLASS3;
571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
470 574
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 575 }
483 576
484 /* Initialize virtual ptrs to dma_buf region. */ 577 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 578}
486 579
487 /* Allocate iotag for psb->cur_iocbq. */ 580/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 582 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 583 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 584 *
492 kfree (psb); 585 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
494 } 671 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 672 return rc;
673}
496 674
497 psb->fcp_cmnd = psb->data; 675/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 677 * @vport: The virtual port for which this call being executed.
500 sizeof(struct fcp_rsp); 678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
501 709
502 /* Initialize local short-hand pointers. */ 710 /*
503 bpl = psb->fcp_bpl; 711 * Get memory from the pci pool to map the virt space to pci bus
504 pdma_phys_fcp_cmd = psb->dma_handle; 712 * space for an I/O. The DMA buffer includes space for the
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 714 * necessary to support the sg_tablesize.
507 sizeof(struct fcp_rsp); 715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
508 722
509 /* 723 /* Initialize virtual ptrs to dma_buf region. */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526 725
527 /* 726 /* Allocate iotag for psb->cur_iocbq. */
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
529 * initialize it with all known data now. 728 if (iotag == 0) {
530 */ 729 kfree(psb);
531 iocb = &psb->cur_iocbq.iocb; 730 break;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 731 }
533 if ((phba->sli_rev == 3) && 732
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
535 /* fill in immediate fcp command BDE */ 734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 799 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
558 } 840 }
559 iocb->ulpClass = CLASS3;
560 841
561 return psb; 842 return bcnt + non_sequential_xri - rc;
562} 843}
563 844
564/** 845/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 847 * @vport: The virtual port for which this call being executed.
848 * @num_to_allocate: The requested number of buffers to allocate.
849 *
850 * This routine wraps the actual SCSI buffer allocator function pointer from
851 * the lpfc_hba struct.
852 *
853 * Return codes:
854 * int - number of scsi buffers that were allocated.
855 * 0 = failure, less than num_to_alloc is a partial failure.
856 **/
857static inline int
858lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
859{
860 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
861}
862
863/**
864 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
865 * @phba: The HBA for which this call is being executed.
567 * 866 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 867 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 868 * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 890}
592 891
593/** 892/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 893 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 894 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 895 * @psb: The scsi buffer which is being released.
597 * 896 *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 898 * lpfc_scsi_buf_list list.
600 **/ 899 **/
601static void 900static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 901lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 902{
604 unsigned long iflag = 0; 903 unsigned long iflag = 0;
605 904
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 909}
611 910
612/** 911/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
946 * @phba: The Hba for which this call is being executed.
947 * @psb: The scsi buffer which is being released.
948 *
949 * This routine releases @psb scsi buffer by adding it to tail of @phba
950 * lpfc_scsi_buf_list list.
951 **/
952static void
953lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
954{
955
956 phba->lpfc_release_scsi_buf(phba, psb);
957}
958
959/**
960 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 961 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 962 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 963 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 964 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 965 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 966 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 967 * IOCB fields which are dependent on scsi command request buffer.
621 * 968 *
622 * Return codes: 969 * Return codes:
623 * 1 - Error 970 * 1 - Error
624 * 0 - Success 971 * 0 - Success
625 **/ 972 **/
626static int 973static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 974lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 975{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 976 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 977 struct scatterlist *sgel = NULL;
@@ -827,8 +1174,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
827 * @reftag: out: ref tag (reference tag) 1174 * @reftag: out: ref tag (reference tag)
828 * 1175 *
829 * Description: 1176 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise, 1177 * Extract DIF parameters from the command if possible. Otherwise,
831 * use default paratmers. 1178 * use default parameters.
832 * 1179 *
833 **/ 1180 **/
834static inline void 1181static inline void
@@ -1312,10 +1659,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1659 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1660 uint64_t failing_sector = 0;
1314 1661
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1662 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1663 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1664 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1665 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1666
1320 spin_lock(&_dump_buf_lock); 1667 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1668 if (!_dump_buf_done) {
@@ -1412,6 +1759,133 @@ out:
1412} 1759}
1413 1760
1414/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1872 * @phba: The Hba for which this call is being executed.
1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1874 *
1875 * This routine wraps the actual DMA mapping function pointer from the
1876 * lpfc_hba struct.
1877 *
1878 * Return codes:
1879 * 1 - Error
1880 * 0 - Success
1881 **/
1882static inline int
1883lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1884{
1885 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1886}
1887
1888/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1889 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1890 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1891 * @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1978}
1505 1979
1506/** 1980/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
1508 * @phba: The Hba for which this call is being executed. 1982 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 1983 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 1984 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 1985 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 1986 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 1987 **/
1514static void 1988static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 1990{
1517 /* 1991 /*
1518 * There are only two special cases to consider. (1) the scsi command 1992 * There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1529} 2003}
1530 2004
1531/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
1532 * lpfc_handler_fcp_err - FCP response handler 2036 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed. 2037 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2180 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2181 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2182 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2183 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2184 *
1681 * This routine assigns scsi command result by looking into response IOCB 2185 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2186 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2461}
1958 2462
1959/** 2463/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
1961 * @vport: The virtual port for which this call is being executed. 2465 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2466 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2467 * @pnode: Pointer to lpfc_nodelist.
1964 * 2468 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2469 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2470 * to transfer for device with SLI3 interface spec.
1967 **/ 2471 **/
1968static void 2472static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode) 2474 struct lpfc_nodelist *pnode)
1971{ 2475{
1972 struct lpfc_hba *phba = vport->phba; 2476 struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2517 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2518 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2519 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2520 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2521 iocb_cmd->un.fcpi.fcpi_parm = 0;
2522 iocb_cmd->ulpPU = 0;
2523 } else
2524 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2525 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2526 phba->fc4OutputRequests++;
2020 } else { 2527 } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2558}
2052 2559
2053/** 2560/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2601 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2603 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2604 * @task_mgmt_cmd: SCSI task management command.
2059 * 2605 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2606 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2607 * for device with SLI-3 interface spec.
2061 * 2608 *
2062 * Return codes: 2609 * Return codes:
2063 * 0 - Error 2610 * 0 - Error
2064 * 1 - Success 2611 * 1 - Success
2065 **/ 2612 **/
2066static int 2613static int
2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd, 2615 struct lpfc_scsi_buf *lpfc_cmd,
2069 unsigned int lun, 2616 unsigned int lun,
2070 uint8_t task_mgmt_cmd) 2617 uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2114} 2661}
2115 2662
2116/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2717 task_mgmt_cmd);
2718}
2719
2720/**
2721 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2722 * @phba: The hba struct for which this call is being executed.
2723 * @dev_grp: The HBA PCI-Device group number.
2724 *
2725 * This routine sets up the SCSI interface API function jump table in @phba
2726 * struct.
2727 * Returns: 0 - success, -ENODEV - failure.
2728 **/
2729int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{
2732
2733 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2752 default:
2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2754 "1418 Invalid HBA PCI-device group: 0x%x\n",
2755 dev_grp);
2756 return -ENODEV;
2757 break;
2758 }
2759 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2760 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2761 return 0;
2762}
2763
2764/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2765 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2766 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2767 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba, 2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2182 &phba->sli.ring[phba->sli.fcp_ring], 2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) { 2831 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) { 2832 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2952 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2953 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2954 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2955 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2956 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2957 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2378,15 +3024,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 3024 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 3026 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 3027 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 3028 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 3029 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 3030 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3031 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 3032 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 3033 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 3034 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 3035 blk_rq_sectors(cmnd->request),
2390 cmnd); 3036 cmnd);
2391 3037
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3038 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +3052,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 3052 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3053 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 3054 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 3055 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 3056 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 3057 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 3058 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3059 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 3060 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 3061 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 3062 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 3063 blk_rq_sectors(cmnd->request), cmnd);
2418 else 3064 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3065 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 3066 "9042 dbg: parser not implemented\n");
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3073 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 3074
2429 atomic_inc(&ndlp->cmd_pending); 3075 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 3076 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3077 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 3078 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 3079 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 3136 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 3138 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 3139 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 3140 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 3141 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3176 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3177 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3178 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3179 if (phba->sli_rev == LPFC_SLI_REV4)
3180 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3181 else
3182 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3183
2536 icmd->ulpLe = 1; 3184 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3185 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3190
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3191 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3192 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3193 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3194 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3195 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3196 ret = FAILED;
2548 goto out; 3197 goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2668 "0703 Issue target reset to TGT %d LUN %d " 3317 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba, 3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3321 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3322 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3473{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3474 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3475 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3476 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3477 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3478 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3479 int num_allocated = 0;
2833 3480
2834 if (!rport || fc_remote_port_chkready(rport)) 3481 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3482 return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3510 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3511 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3512 }
2866 3513 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3514 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3516 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3517 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3518 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3519 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3520 }
2881 return 0; 3521 return 0;
2882} 3522}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa2..65dfc8bd5b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba..ff04daf18f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
4143 return -EIO;
4144
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4147 kfree(mp);
4148 return 0;
4149}
4150
4151/**
4152 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4153 * @phba: pointer to lpfc hba data structure.
4154 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4155 * @vpd: pointer to the memory to hold resulting port vpd data.
4156 * @vpd_size: On input, the number of bytes allocated to @vpd.
4157 * On output, the number of data bytes in @vpd.
4158 *
4159 * This routine executes a READ_REV SLI4 mailbox command. In
4160 * addition, this routine gets the port vpd data.
4161 *
4162 * Return codes
4163 * 0 - sucessful
4164 * ENOMEM - could not allocated memory.
4165 **/
4166static int
4167lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4168 uint8_t *vpd, uint32_t *vpd_size)
4169{
4170 int rc = 0;
4171 uint32_t dma_size;
4172 struct lpfc_dmabuf *dmabuf;
4173 struct lpfc_mqe *mqe;
4174
4175 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4176 if (!dmabuf)
4177 return -ENOMEM;
4178
4179 /*
4180 * Get a DMA buffer for the vpd data resulting from the READ_REV
4181 * mailbox command.
4182 */
4183 dma_size = *vpd_size;
4184 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4185 dma_size,
4186 &dmabuf->phys,
4187 GFP_KERNEL);
4188 if (!dmabuf->virt) {
4189 kfree(dmabuf);
4190 return -ENOMEM;
4191 }
4192 memset(dmabuf->virt, 0, dma_size);
4193
4194 /*
4195 * The SLI4 implementation of READ_REV conflicts at word1,
4196 * bits 31:16 and SLI4 adds vpd functionality not present
4197 * in SLI3. This code corrects the conflicts.
4198 */
4199 lpfc_read_rev(phba, mboxq);
4200 mqe = &mboxq->u.mqe;
4201 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4202 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4203 mqe->un.read_rev.word1 &= 0x0000FFFF;
4204 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4205 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4206
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc) {
4209 dma_free_coherent(&phba->pcidev->dev, dma_size,
4210 dmabuf->virt, dmabuf->phys);
4211 return -EIO;
4212 }
4213
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /*
4236 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than
4238 * case and update the caller's size.
4239 */
4240 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4241 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4242
4243 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4244 dma_free_coherent(&phba->pcidev->dev, dma_size,
4245 dmabuf->virt, dmabuf->phys);
4246 kfree(dmabuf);
4247 return 0;
4248}
4249
4250/**
4251 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4252 * @phba: pointer to lpfc hba data structure.
4253 *
4254 * This routine is called to explicitly arm the SLI4 device's completion and
4255 * event queues
4256 **/
4257static void
4258lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4259{
4260 uint8_t fcp_eqidx;
4261
4262 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4263 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4264 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4265 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4266 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4267 LPFC_QUEUE_REARM);
4268 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4270 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4271 LPFC_QUEUE_REARM);
4272}
4273
4274/**
4275 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4276 * @phba: Pointer to HBA context object.
4277 *
4278 * This function is the main SLI4 device intialization PCI function. This
4279 * function is called by the HBA intialization code, HBA reset code and
4280 * HBA error attention handler code. Caller is not required to hold any
4281 * locks.
4282 **/
4283int
4284lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4285{
4286 int rc;
4287 LPFC_MBOXQ_t *mboxq;
4288 struct lpfc_mqe *mqe;
4289 uint8_t *vpd;
4290 uint32_t vpd_size;
4291 uint32_t ftr_rsp = 0;
4292 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4293 struct lpfc_vport *vport = phba->pport;
4294 struct lpfc_dmabuf *mp;
4295
4296 /* Perform a PCI function reset to start from clean */
4297 rc = lpfc_pci_function_reset(phba);
4298 if (unlikely(rc))
4299 return -ENODEV;
4300
4301 /* Check the HBA Host Status Register for readyness */
4302 rc = lpfc_sli4_post_status_check(phba);
4303 if (unlikely(rc))
4304 return -ENODEV;
4305 else {
4306 spin_lock_irq(&phba->hbalock);
4307 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4308 spin_unlock_irq(&phba->hbalock);
4309 }
4310
4311 /*
4312 * Allocate a single mailbox container for initializing the
4313 * port.
4314 */
4315 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4316 if (!mboxq)
4317 return -ENOMEM;
4318
4319 /*
4320 * Continue initialization with default values even if driver failed
4321 * to read FCoE param config regions
4322 */
4323 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4325 "2570 Failed to read FCoE parameters \n");
4326
4327 /* Issue READ_REV to collect vpd and FW information. */
4328 vpd_size = PAGE_SIZE;
4329 vpd = kzalloc(vpd_size, GFP_KERNEL);
4330 if (!vpd) {
4331 rc = -ENOMEM;
4332 goto out_free_mbox;
4333 }
4334
4335 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4336 if (unlikely(rc))
4337 goto out_free_vpd;
4338
4339 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO;
4349 goto out_free_vpd;
4350 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /*
4356 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure
4358 * is not fatal as the driver will use generic values.
4359 */
4360 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4361 if (unlikely(!rc)) {
4362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4363 "0377 Error %d parsing vpd. "
4364 "Using defaults.\n", rc);
4365 rc = 0;
4366 }
4367
4368 /* By now, we should determine the SLI revision, hard code for now */
4369 phba->sli_rev = LPFC_SLI_REV4;
4370
4371 /*
4372 * Discover the port's supported feature set and match it against the
4373 * hosts requests.
4374 */
4375 lpfc_request_features(phba, mboxq);
4376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4377 if (unlikely(rc)) {
4378 rc = -EIO;
4379 goto out_free_vpd;
4380 }
4381
4382 /*
4383 * The port must support FCP initiator mode as this is the
4384 * only mode running in the host.
4385 */
4386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4388 "0378 No support for fcpi mode.\n");
4389 ftr_rsp++;
4390 }
4391
4392 /*
4393 * If the port cannot support the host's requested features
4394 * then turn off the global config parameters to disable the
4395 * feature in the driver. This is not a fatal error.
4396 */
4397 if ((phba->cfg_enable_bg) &&
4398 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4399 ftr_rsp++;
4400
4401 if (phba->max_vpi && phba->cfg_enable_npiv &&
4402 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4403 ftr_rsp++;
4404
4405 if (ftr_rsp) {
4406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4407 "0379 Feature Mismatch Data: x%08x %08x "
4408 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4409 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4410 phba->cfg_enable_npiv, phba->max_vpi);
4411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4412 phba->cfg_enable_bg = 0;
4413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4414 phba->cfg_enable_npiv = 0;
4415 }
4416
4417 /* These SLI3 features are assumed in SLI4 */
4418 spin_lock_irq(&phba->hbalock);
4419 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4420 spin_unlock_irq(&phba->hbalock);
4421
4422 /* Read the port's service parameters. */
4423 lpfc_read_sparam(phba, mboxq, vport->vpi);
4424 mboxq->vport = vport;
4425 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4426 mp = (struct lpfc_dmabuf *) mboxq->context1;
4427 if (rc == MBX_SUCCESS) {
4428 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4429 rc = 0;
4430 }
4431
4432 /*
4433 * This memory was allocated by the lpfc_read_sparam routine. Release
4434 * it to the mbuf pool.
4435 */
4436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4437 kfree(mp);
4438 mboxq->context1 = NULL;
4439 if (unlikely(rc)) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4441 "0382 READ_SPARAM command failed "
4442 "status %d, mbxStatus x%x\n",
4443 rc, bf_get(lpfc_mqe_status, mqe));
4444 phba->link_state = LPFC_HBA_ERROR;
4445 rc = -EIO;
4446 goto out_free_vpd;
4447 }
4448
4449 if (phba->cfg_soft_wwnn)
4450 u64_to_wwn(phba->cfg_soft_wwnn,
4451 vport->fc_sparam.nodeName.u.wwn);
4452 if (phba->cfg_soft_wwpn)
4453 u64_to_wwn(phba->cfg_soft_wwpn,
4454 vport->fc_sparam.portName.u.wwn);
4455 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4456 sizeof(struct lpfc_name));
4457 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4458 sizeof(struct lpfc_name));
4459
4460 /* Update the fc_host data structures with new wwn. */
4461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4463
4464 /* Register SGL pool to the device using non-embedded mailbox command */
4465 rc = lpfc_sli4_post_sgl_list(phba);
4466 if (unlikely(rc)) {
4467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4468 "0582 Error %d during sgl post operation", rc);
4469 rc = -ENODEV;
4470 goto out_free_vpd;
4471 }
4472
4473 /* Register SCSI SGL pool to the device */
4474 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4475 if (unlikely(rc)) {
4476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4477 "0383 Error %d during scsi sgl post opeation",
4478 rc);
4479 /* Some Scsi buffers were moved to the abort scsi list */
4480 /* A pci function reset will repost them */
4481 rc = -ENODEV;
4482 goto out_free_vpd;
4483 }
4484
4485 /* Post the rpi header region to the device. */
4486 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4487 if (unlikely(rc)) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4489 "0393 Error %d during rpi post operation\n",
4490 rc);
4491 rc = -ENODEV;
4492 goto out_free_vpd;
4493 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496
4497 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba);
4499 if (unlikely(rc)) {
4500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4501 "0381 Error %d during queue setup.\n ", rc);
4502 goto out_stop_timers;
4503 }
4504
4505 /* Arm the CQs and then EQs on device */
4506 lpfc_sli4_arm_cqeq_intr(phba);
4507
4508 /* Indicate device interrupt mode */
4509 phba->sli4_hba.intr_enable = 1;
4510
4511 /* Allow asynchronous mailbox command to go through */
4512 spin_lock_irq(&phba->hbalock);
4513 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4514 spin_unlock_irq(&phba->hbalock);
4515
4516 /* Post receive buffers to the device */
4517 lpfc_sli4_rb_setup(phba);
4518
4519 /* Start the ELS watchdog timer */
4520 /*
4521 * The driver for SLI4 is not yet ready to process timeouts
4522 * or interrupts. Once it is, the comment bars can be removed.
4523 */
4524 /* mod_timer(&vport->els_tmofunc,
4525 * jiffies + HZ * (phba->fc_ratov*2)); */
4526
4527 /* Start heart beat timer */
4528 mod_timer(&phba->hb_tmofunc,
4529 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4530 phba->hb_outstanding = 0;
4531 phba->last_completion_time = jiffies;
4532
4533 /* Start error attention (ERATT) polling timer */
4534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4535
4536 /*
4537 * The port is ready, set the host's link state to LINK_DOWN
4538 * in preparation for link interrupts.
4539 */
4540 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4541 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4542 lpfc_set_loopback_flag(phba);
4543 /* Change driver state to LPFC_LINK_DOWN right before init link */
4544 spin_lock_irq(&phba->hbalock);
4545 phba->link_state = LPFC_LINK_DOWN;
4546 spin_unlock_irq(&phba->hbalock);
4547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4548 if (unlikely(rc != MBX_NOT_FINISHED)) {
4549 kfree(vpd);
4550 return 0;
4551 } else
4552 rc = -EIO;
4553
4554 /* Unset all the queues set up in this routine when error out */
4555 if (rc)
4556 lpfc_sli4_queue_unset(phba);
4557
4558out_stop_timers:
4559 if (rc)
4560 lpfc_stop_hba_timers(phba);
4561out_free_vpd:
4562 kfree(vpd);
4563out_free_mbox:
4564 mempool_free(mboxq, phba->mbox_mem_pool);
4565 return rc;
4566}
3203 4567
3204/** 4568/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4569 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4608lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4609{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4610 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4611 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4612 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4613 struct lpfc_sli_ring *pring;
3250 4614
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4645 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4646 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4647 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4649 spin_unlock_irq(&phba->hbalock);
3286 4650
3287 pring = &psli->ring[psli->fcp_ring]; 4651 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4653
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4655 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4656
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4657 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4658 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4659}
3308 4660
3309/** 4661/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4662 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4663 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4664 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4665 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4666 *
3315 * This function is called by discovery code and HBA management code 4667 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4668 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4669 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4670 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4671 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4672 * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4684 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4685 * the function.
3334 **/ 4686 **/
3335int 4687static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4688lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4689 uint32_t flag)
3337{ 4690{
3338 MAILBOX_t *mb; 4691 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4692 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4702 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4703 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4704 /* processing mbox queue from intr_handler */
4705 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4707 return MBX_SUCCESS;
4708 }
3352 processing_queue = 1; 4709 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4710 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4711 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4722 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4723 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4724 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4725 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4726 dump_stack();
3370 goto out_not_finished; 4727 goto out_not_finished;
3371 } 4728 }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4742
3386 psli = &phba->sli; 4743 psli = &phba->sli;
3387 4744
3388 mb = &pmbox->mb; 4745 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4746 status = MBX_SUCCESS;
3390 4747
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4748 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4750
3394 /* Mbox command <mbxCommand> cannot issue */ 4751 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753 "(%d):0311 Mailbox command x%x cannot "
4754 "issue Data: x%x x%x\n",
4755 pmbox->vport ? pmbox->vport->vpi : 0,
4756 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4757 goto out_not_finished;
3397 } 4758 }
3398 4759
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4760 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4761 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4762 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4763 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4764 "(%d):2528 Mailbox command x%x cannot "
4765 "issue Data: x%x x%x\n",
4766 pmbox->vport ? pmbox->vport->vpi : 0,
4767 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4768 goto out_not_finished;
3404 } 4769 }
3405 4770
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4779
3415 /* Mbox command <mbxCommand> cannot issue */ 4780 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4782 "(%d):2529 Mailbox command x%x "
4783 "cannot issue Data: x%x x%x\n",
4784 pmbox->vport ? pmbox->vport->vpi : 0,
4785 pmbox->u.mb.mbxCommand,
4786 psli->sli_flag, flag);
3417 goto out_not_finished; 4787 goto out_not_finished;
3418 } 4788 }
3419 4789
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4790 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4792 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4794 "(%d):2530 Mailbox command x%x "
4795 "cannot issue Data: x%x x%x\n",
4796 pmbox->vport ? pmbox->vport->vpi : 0,
4797 pmbox->u.mb.mbxCommand,
4798 psli->sli_flag, flag);
3424 goto out_not_finished; 4799 goto out_not_finished;
3425 } 4800 }
3426 4801
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4837
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4838 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4839 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4840 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4841 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4843 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4844 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4846 "(%d):2531 Mailbox command x%x "
4847 "cannot issue Data: x%x x%x\n",
4848 pmbox->vport ? pmbox->vport->vpi : 0,
4849 pmbox->u.mb.mbxCommand,
4850 psli->sli_flag, flag);
3471 goto out_not_finished; 4851 goto out_not_finished;
3472 } 4852 }
3473 /* timeout active mbox command */ 4853 /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4886 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4887 mb->mbxOwner = OWN_CHIP;
3508 4888
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4889 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4890 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4891 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4892 } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4909
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4911 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4912 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4913 }
3534 } 4914 }
3535 4915
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4932 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4933 readl(phba->CAregaddr); /* flush */
3554 4934
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4935 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4936 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4937 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4938 word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4972 }
3593 4973
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4975 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4976 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4977 word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4985 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4986 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4987 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4988 word0 = slimword0;
3609 } 4989 }
3610 } 4990 }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 4996 ha_copy = readl(phba->HAregaddr);
3617 } 4997 }
3618 4998
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4999 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5000 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5001 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5002 } else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5023
3644out_not_finished: 5024out_not_finished:
3645 if (processing_queue) { 5025 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5026 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5027 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5028 }
3649 return MBX_NOT_FINISHED; 5029 return MBX_NOT_FINISHED;
3650} 5030}
3651 5031
3652/** 5032/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object.
5036 *
5037 * The function posts a mailbox to the port. The mailbox is expected
5038 * to be comletely filled in and ready for the port to operate on it.
5039 * This routine executes a synchronous completion operation on the
5040 * mailbox by polling for its completion.
5041 *
5042 * The caller must not be holding any locks when calling this routine.
5043 *
5044 * Returns:
5045 * MBX_SUCCESS - mailbox posted successfully
5046 * Any of the MBX error values.
5047 **/
5048static int
5049lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5050{
5051 int rc = MBX_SUCCESS;
5052 unsigned long iflag;
5053 uint32_t db_ready;
5054 uint32_t mcqe_status;
5055 uint32_t mbx_cmnd;
5056 unsigned long timeout;
5057 struct lpfc_sli *psli = &phba->sli;
5058 struct lpfc_mqe *mb = &mboxq->u.mqe;
5059 struct lpfc_bmbx_create *mbox_rgn;
5060 struct dma_address *dma_address;
5061 struct lpfc_register bmbx_reg;
5062
5063 /*
5064 * Only one mailbox can be active to the bootstrap mailbox region
5065 * at a time and there is no queueing provided.
5066 */
5067 spin_lock_irqsave(&phba->hbalock, iflag);
5068 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5069 spin_unlock_irqrestore(&phba->hbalock, iflag);
5070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5071 "(%d):2532 Mailbox command x%x (x%x) "
5072 "cannot issue Data: x%x x%x\n",
5073 mboxq->vport ? mboxq->vport->vpi : 0,
5074 mboxq->u.mb.mbxCommand,
5075 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5076 psli->sli_flag, MBX_POLL);
5077 return MBXERR_ERROR;
5078 }
5079 /* The server grabs the token and owns it until release */
5080 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5081 phba->sli.mbox_active = mboxq;
5082 spin_unlock_irqrestore(&phba->hbalock, iflag);
5083
5084 /*
5085 * Initialize the bootstrap memory region to avoid stale data areas
5086 * in the mailbox post. Then copy the caller's mailbox contents to
5087 * the bmbx mailbox region.
5088 */
5089 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5090 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5091 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5092 sizeof(struct lpfc_mqe));
5093
5094 /* Post the high mailbox dma address to the port and wait for ready. */
5095 dma_address = &phba->sli4_hba.bmbx.dma_address;
5096 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5097
5098 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5099 * 1000) + jiffies;
5100 do {
5101 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5102 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5103 if (!db_ready)
5104 msleep(2);
5105
5106 if (time_after(jiffies, timeout)) {
5107 rc = MBXERR_ERROR;
5108 goto exit;
5109 }
5110 } while (!db_ready);
5111
5112 /* Post the low mailbox dma address to the port. */
5113 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5114 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5115 * 1000) + jiffies;
5116 do {
5117 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5118 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5119 if (!db_ready)
5120 msleep(2);
5121
5122 if (time_after(jiffies, timeout)) {
5123 rc = MBXERR_ERROR;
5124 goto exit;
5125 }
5126 } while (!db_ready);
5127
5128 /*
5129 * Read the CQ to ensure the mailbox has completed.
5130 * If so, update the mailbox status so that the upper layers
5131 * can complete the request normally.
5132 */
5133 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5134 sizeof(struct lpfc_mqe));
5135 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5136 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5137 sizeof(struct lpfc_mcqe));
5138 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5139
5140 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5141 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5142 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5143 rc = MBXERR_ERROR;
5144 }
5145
5146 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5147 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5148 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5149 " x%x x%x CQ: x%x x%x x%x x%x\n",
5150 mboxq->vport ? mboxq->vport->vpi : 0,
5151 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5152 bf_get(lpfc_mqe_status, mb),
5153 mb->un.mb_words[0], mb->un.mb_words[1],
5154 mb->un.mb_words[2], mb->un.mb_words[3],
5155 mb->un.mb_words[4], mb->un.mb_words[5],
5156 mb->un.mb_words[6], mb->un.mb_words[7],
5157 mb->un.mb_words[8], mb->un.mb_words[9],
5158 mb->un.mb_words[10], mb->un.mb_words[11],
5159 mb->un.mb_words[12], mboxq->mcqe.word0,
5160 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5161 mboxq->mcqe.trailer);
5162exit:
5163 /* We are holding the token, no needed for lock when release */
5164 spin_lock_irqsave(&phba->hbalock, iflag);
5165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5166 phba->sli.mbox_active = NULL;
5167 spin_unlock_irqrestore(&phba->hbalock, iflag);
5168 return rc;
5169}
5170
5171/**
5172 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5173 * @phba: Pointer to HBA context object.
5174 * @pmbox: Pointer to mailbox object.
5175 * @flag: Flag indicating how the mailbox need to be processed.
5176 *
5177 * This function is called by discovery code and HBA management code to submit
5178 * a mailbox command to firmware with SLI-4 interface spec.
5179 *
5180 * Return codes the caller owns the mailbox command after the return of the
5181 * function.
5182 **/
5183static int
5184lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5185 uint32_t flag)
5186{
5187 struct lpfc_sli *psli = &phba->sli;
5188 unsigned long iflags;
5189 int rc;
5190
5191 /* Detect polling mode and jump to a handler */
5192 if (!phba->sli4_hba.intr_enable) {
5193 if (flag == MBX_POLL)
5194 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5195 else
5196 rc = -EIO;
5197 if (rc != MBX_SUCCESS)
5198 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5199 "(%d):2541 Mailbox command x%x "
5200 "(x%x) cannot issue Data: x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 mboxq->u.mb.mbxCommand,
5203 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5204 psli->sli_flag, flag);
5205 return rc;
5206 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) "
5209 "cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return -EIO;
5215 }
5216
5217 /* Now, interrupt mode asynchrous mailbox command */
5218 rc = lpfc_mbox_cmd_check(phba, mboxq);
5219 if (rc) {
5220 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5221 "(%d):2543 Mailbox command x%x (x%x) "
5222 "cannot issue Data: x%x x%x\n",
5223 mboxq->vport ? mboxq->vport->vpi : 0,
5224 mboxq->u.mb.mbxCommand,
5225 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5226 psli->sli_flag, flag);
5227 goto out_not_finished;
5228 }
5229 rc = lpfc_mbox_dev_check(phba);
5230 if (unlikely(rc)) {
5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5232 "(%d):2544 Mailbox command x%x (x%x) "
5233 "cannot issue Data: x%x x%x\n",
5234 mboxq->vport ? mboxq->vport->vpi : 0,
5235 mboxq->u.mb.mbxCommand,
5236 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5237 psli->sli_flag, flag);
5238 goto out_not_finished;
5239 }
5240
5241 /* Put the mailbox command to the driver internal FIFO */
5242 psli->slistat.mbox_busy++;
5243 spin_lock_irqsave(&phba->hbalock, iflags);
5244 lpfc_mbox_put(phba, mboxq);
5245 spin_unlock_irqrestore(&phba->hbalock, iflags);
5246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5247 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5248 "x%x (x%x) x%x x%x x%x\n",
5249 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5250 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5251 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5252 phba->pport->port_state,
5253 psli->sli_flag, MBX_NOWAIT);
5254 /* Wake up worker thread to transport mailbox command from head */
5255 lpfc_worker_wake_up(phba);
5256
5257 return MBX_BUSY;
5258
5259out_not_finished:
5260 return MBX_NOT_FINISHED;
5261}
5262
5263/**
5264 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5265 * @phba: Pointer to HBA context object.
5266 *
5267 * This function is called by worker thread to send a mailbox command to
5268 * SLI4 HBA firmware.
5269 *
5270 **/
5271int
5272lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 LPFC_MBOXQ_t *mboxq;
5276 int rc = MBX_SUCCESS;
5277 unsigned long iflags;
5278 struct lpfc_mqe *mqe;
5279 uint32_t mbx_cmnd;
5280
5281 /* Check interrupt mode before post async mailbox command */
5282 if (unlikely(!phba->sli4_hba.intr_enable))
5283 return MBX_NOT_FINISHED;
5284
5285 /* Check for mailbox command service token */
5286 spin_lock_irqsave(&phba->hbalock, iflags);
5287 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5288 spin_unlock_irqrestore(&phba->hbalock, iflags);
5289 return MBX_NOT_FINISHED;
5290 }
5291 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5292 spin_unlock_irqrestore(&phba->hbalock, iflags);
5293 return MBX_NOT_FINISHED;
5294 }
5295 if (unlikely(phba->sli.mbox_active)) {
5296 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "0384 There is pending active mailbox cmd\n");
5299 return MBX_NOT_FINISHED;
5300 }
5301 /* Take the mailbox command service token */
5302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5303
5304 /* Get the next mailbox command from head of queue */
5305 mboxq = lpfc_mbox_get(phba);
5306
5307 /* If no more mailbox command waiting for post, we're done */
5308 if (!mboxq) {
5309 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5310 spin_unlock_irqrestore(&phba->hbalock, iflags);
5311 return MBX_SUCCESS;
5312 }
5313 phba->sli.mbox_active = mboxq;
5314 spin_unlock_irqrestore(&phba->hbalock, iflags);
5315
5316 /* Check device readiness for posting mailbox command */
5317 rc = lpfc_mbox_dev_check(phba);
5318 if (unlikely(rc))
5319 /* Driver clean routine will clean up pending mailbox */
5320 goto out_not_finished;
5321
5322 /* Prepare the mbox command to be posted */
5323 mqe = &mboxq->u.mqe;
5324 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5325
5326 /* Start timer for the mbox_tmo and log some mailbox post messages */
5327 mod_timer(&psli->mbox_tmo, (jiffies +
5328 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5329
5330 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5331 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5332 "x%x x%x\n",
5333 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5334 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5335 phba->pport->port_state, psli->sli_flag);
5336
5337 if (mbx_cmnd != MBX_HEARTBEAT) {
5338 if (mboxq->vport) {
5339 lpfc_debugfs_disc_trc(mboxq->vport,
5340 LPFC_DISC_TRC_MBOX_VPORT,
5341 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5342 mbx_cmnd, mqe->un.mb_words[0],
5343 mqe->un.mb_words[1]);
5344 } else {
5345 lpfc_debugfs_disc_trc(phba->pport,
5346 LPFC_DISC_TRC_MBOX,
5347 "MBOX Send: cmd:x%x mb:x%x x%x",
5348 mbx_cmnd, mqe->un.mb_words[0],
5349 mqe->un.mb_words[1]);
5350 }
5351 }
5352 psli->slistat.mbox_cmd++;
5353
5354 /* Post the mailbox command to the port */
5355 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5356 if (rc != MBX_SUCCESS) {
5357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5358 "(%d):2533 Mailbox command x%x (x%x) "
5359 "cannot issue Data: x%x x%x\n",
5360 mboxq->vport ? mboxq->vport->vpi : 0,
5361 mboxq->u.mb.mbxCommand,
5362 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5363 psli->sli_flag, MBX_NOWAIT);
5364 goto out_not_finished;
5365 }
5366
5367 return rc;
5368
5369out_not_finished:
5370 spin_lock_irqsave(&phba->hbalock, iflags);
5371 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5372 __lpfc_mbox_cmpl_put(phba, mboxq);
5373 /* Release the token */
5374 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5375 phba->sli.mbox_active = NULL;
5376 spin_unlock_irqrestore(&phba->hbalock, iflags);
5377
5378 return MBX_NOT_FINISHED;
5379}
5380
5381/**
5382 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5383 * @phba: Pointer to HBA context object.
5384 * @pmbox: Pointer to mailbox object.
5385 * @flag: Flag indicating how the mailbox need to be processed.
5386 *
5387 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5388 * the API jump table function pointer from the lpfc_hba struct.
5389 *
5390 * Return codes the caller owns the mailbox command after the return of the
5391 * function.
5392 **/
5393int
5394lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5395{
5396 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5397}
5398
5399/**
5400 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5401 * @phba: The hba struct for which this call is being executed.
5402 * @dev_grp: The HBA PCI-Device group number.
5403 *
5404 * This routine sets up the mbox interface API function jump table in @phba
5405 * struct.
5406 * Returns: 0 - success, -ENODEV - failure.
5407 **/
5408int
5409lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5410{
5411
5412 switch (dev_grp) {
5413 case LPFC_PCI_DEV_LP:
5414 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5415 phba->lpfc_sli_handle_slow_ring_event =
5416 lpfc_sli_handle_slow_ring_event_s3;
5417 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5418 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5419 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5420 break;
5421 case LPFC_PCI_DEV_OC:
5422 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5423 phba->lpfc_sli_handle_slow_ring_event =
5424 lpfc_sli_handle_slow_ring_event_s4;
5425 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5426 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5427 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5428 break;
5429 default:
5430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431 "1420 Invalid HBA PCI-device group: 0x%x\n",
5432 dev_grp);
5433 return -ENODEV;
5434 break;
5435 }
5436 return 0;
5437}
5438
5439/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5440 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5441 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5442 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5488}
3702 5489
3703/** 5490/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5491 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5492 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5493 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5494 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5495 * @flag: Flag indicating if this command can be put into txq.
3709 * 5496 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5497 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5498 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5499 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5500 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5501 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5502 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5503 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5504 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5505 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5506 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5507 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5508 * This function is called with hbalock held. The function will return success
3722 * 5509 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5510 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5511 **/
3727static int 5512static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5513__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5514 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5515{
3731 struct lpfc_iocbq *nextiocb; 5516 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5517 IOCB_t *iocb;
5518 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5519
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5520 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5521 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5619 return IOCB_BUSY;
3834} 5620}
3835 5621
5622/**
5623 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5624 * @phba: Pointer to HBA context object.
5625 * @piocb: Pointer to command iocb.
5626 * @sglq: Pointer to the scatter gather queue object.
5627 *
5628 * This routine converts the bpl or bde that is in the IOCB
5629 * to a sgl list for the sli4 hardware. The physical address
5630 * of the bpl/bde is converted back to a virtual address.
5631 * If the IOCB contains a BPL then the list of BDE's is
5632 * converted to sli4_sge's. If the IOCB contains a single
5633 * BDE then it is converted to a single sli_sge.
5634 * The IOCB is still in cpu endianess so the contents of
5635 * the bpl can be used without byte swapping.
5636 *
5637 * Returns valid XRI = Success, NO_XRI = Failure.
5638**/
5639static uint16_t
5640lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5641 struct lpfc_sglq *sglq)
5642{
5643 uint16_t xritag = NO_XRI;
5644 struct ulp_bde64 *bpl = NULL;
5645 struct ulp_bde64 bde;
5646 struct sli4_sge *sgl = NULL;
5647 IOCB_t *icmd;
5648 int numBdes = 0;
5649 int i = 0;
5650
5651 if (!piocbq || !sglq)
5652 return xritag;
5653
5654 sgl = (struct sli4_sge *)sglq->sgl;
5655 icmd = &piocbq->iocb;
5656 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5657 numBdes = icmd->un.genreq64.bdl.bdeSize /
5658 sizeof(struct ulp_bde64);
5659 /* The addrHigh and addrLow fields within the IOCB
5660 * have not been byteswapped yet so there is no
5661 * need to swap them back.
5662 */
5663 bpl = (struct ulp_bde64 *)
5664 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5665
5666 if (!bpl)
5667 return xritag;
5668
5669 for (i = 0; i < numBdes; i++) {
5670 /* Should already be byte swapped. */
5671 sgl->addr_hi = bpl->addrHigh;
5672 sgl->addr_lo = bpl->addrLow;
5673 /* swap the size field back to the cpu so we
5674 * can assign it to the sgl.
5675 */
5676 bde.tus.w = le32_to_cpu(bpl->tus.w);
5677 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5678 if ((i+1) == numBdes)
5679 bf_set(lpfc_sli4_sge_last, sgl, 1);
5680 else
5681 bf_set(lpfc_sli4_sge_last, sgl, 0);
5682 sgl->word2 = cpu_to_le32(sgl->word2);
5683 sgl->word3 = cpu_to_le32(sgl->word3);
5684 bpl++;
5685 sgl++;
5686 }
5687 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5688 /* The addrHigh and addrLow fields of the BDE have not
5689 * been byteswapped yet so they need to be swapped
5690 * before putting them in the sgl.
5691 */
5692 sgl->addr_hi =
5693 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5694 sgl->addr_lo =
5695 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5696 bf_set(lpfc_sli4_sge_len, sgl,
5697 icmd->un.genreq64.bdl.bdeSize);
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 sgl->word2 = cpu_to_le32(sgl->word2);
5700 sgl->word3 = cpu_to_le32(sgl->word3);
5701 }
5702 return sglq->sli4_xritag;
5703}
5704
5705/**
5706 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5707 * @phba: Pointer to HBA context object.
5708 * @piocb: Pointer to command iocb.
5709 *
5710 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5711 * distribution.
5712 *
5713 * Return: index into SLI4 fast-path FCP queue index.
5714 **/
5715static uint32_t
5716lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5717{
5718 static uint32_t fcp_qidx;
5719
5720 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5721}
5722
5723/**
5724 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5725 * @phba: Pointer to HBA context object.
5726 * @piocb: Pointer to command iocb.
5727 * @wqe: Pointer to the work queue entry.
5728 *
5729 * This routine converts the iocb command to its Work Queue Entry
5730 * equivalent. The wqe pointer should not have any fields set when
5731 * this routine is called because it will memcpy over them.
5732 * This routine does not set the CQ_ID or the WQEC bits in the
5733 * wqe.
5734 *
5735 * Returns: 0 = Success, IOCB_ERROR = Failure.
5736 **/
5737static int
5738lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5739 union lpfc_wqe *wqe)
5740{
5741 uint32_t payload_len = 0;
5742 uint8_t ct = 0;
5743 uint32_t fip;
5744 uint32_t abort_tag;
5745 uint8_t command_type = ELS_COMMAND_NON_FIP;
5746 uint8_t cmnd;
5747 uint16_t xritag;
5748 struct ulp_bde64 *bpl = NULL;
5749
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND;
5758 else {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5760 "2019 Invalid cmd 0x%x\n",
5761 iocbq->iocb.ulpCommand);
5762 return IOCB_ERROR;
5763 }
5764 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag;
5767 xritag = iocbq->sli4_xritag;
5768 wqe->words[7] = 0; /* The ct field has moved so reset */
5769 /* words0-2 bpl convert bde */
5770 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5771 bpl = (struct ulp_bde64 *)
5772 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5773 if (!bpl)
5774 return IOCB_ERROR;
5775
5776 /* Should already be byte swapped. */
5777 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5778 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5779 /* swap the size field back to the cpu so we
5780 * can assign it to the sgl.
5781 */
5782 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5783 payload_len = wqe->generic.bde.tus.f.bdeSize;
5784 } else
5785 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5786
5787 iocbq->iocb.ulpIoTag = iocbq->iotag;
5788 cmnd = iocbq->iocb.ulpCommand;
5789
5790 switch (iocbq->iocb.ulpCommand) {
5791 case CMD_ELS_REQUEST64_CR:
5792 if (!iocbq->iocb.ulpLe) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5794 "2007 Only Limited Edition cmd Format"
5795 " supported 0x%x\n",
5796 iocbq->iocb.ulpCommand);
5797 return IOCB_ERROR;
5798 }
5799 wqe->els_req.payload_len = payload_len;
5800 /* Els_reguest64 has a TMO */
5801 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5802 iocbq->iocb.ulpTimeout);
5803 /* Need a VF for word 4 set the vf bit*/
5804 bf_set(els_req64_vf, &wqe->els_req, 0);
5805 /* And a VFID for word 12 */
5806 bf_set(els_req64_vfid, &wqe->els_req, 0);
5807 /*
5808 * Set ct field to 3, indicates that the context_tag field
5809 * contains the FCFI and remote N_Port_ID is
5810 * in word 5.
5811 */
5812
5813 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext);
5816
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5825 break;
5826 case CMD_XMIT_SEQUENCE64_CR:
5827 /* word3 iocb=io_tag32 wqe=payload_offset */
5828 /* payload offset used for multilpe outstanding
5829 * sequences on the same exchange
5830 */
5831 wqe->words[3] = 0;
5832 /* word4 relative_offset memcpy */
5833 /* word5 r_ctl/df_ctl memcpy */
5834 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5835 wqe->xmit_sequence.xmit_len = payload_len;
5836 break;
5837 case CMD_XMIT_BCAST64_CN:
5838 /* word3 iocb=iotag32 wqe=payload_len */
5839 wqe->words[3] = 0; /* no definition for this in wqe */
5840 /* word4 iocb=rsvd wqe=rsvd */
5841 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5842 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5844 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5845 break;
5846 case CMD_FCP_IWRITE64_CR:
5847 command_type = FCP_COMMAND_DATA_OUT;
5848 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5849 * confusing.
5850 * word3 is payload_len: byte offset to the sgl entry for the
5851 * fcp_command.
5852 * word4 is total xfer len, same as the IOCB->ulpParameter.
5853 * word5 is initial xfer len 0 = wait for xfer-ready
5854 */
5855
5856 /* Always wait for xfer-ready before sending data */
5857 wqe->fcp_iwrite.initial_xfer_len = 0;
5858 /* word 4 (xfer length) should have been set on the memcpy */
5859
5860 /* allow write to fall through to read */
5861 case CMD_FCP_IREAD64_CR:
5862 /* FCP_CMD is always the 1st sgl entry */
5863 wqe->fcp_iread.payload_len =
5864 payload_len + sizeof(struct fcp_rsp);
5865
5866 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5869 iocbq->iocb.ulpFCP2Rcvy);
5870 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5871 /* The XC bit and the XS bit are similar. The driver never
5872 * tracked whether or not the exchange was previouslly open.
5873 * XC = Exchange create, 0 is create. 1 is already open.
5874 * XS = link cmd: 1 do not close the exchange after command.
5875 * XS = 0 close exchange when command completes.
5876 * The only time we would not set the XC bit is when the XS bit
5877 * is set and we are sending our 2nd or greater command on
5878 * this exchange.
5879 */
5880
5881 /* ALLOW read & write to fall through to ICMD64 */
5882 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5888 break;
5889 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the
5891 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5892 * sgl[0] = cmnd
5893 * sgl[1] = rsp.
5894 *
5895 */
5896 wqe->gen_req.command_len = payload_len;
5897 /* Word4 parameter copied in the memcpy */
5898 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5899 /* word6 context tag copied in memcpy */
5900 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5901 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5903 "2015 Invalid CT %x command 0x%x\n",
5904 ct, iocbq->iocb.ulpCommand);
5905 return IOCB_ERROR;
5906 }
5907 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5908 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910
5911 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5912 command_type = OTHER_COMMAND;
5913 break;
5914 case CMD_XMIT_ELS_RSP64_CX:
5915 /* words0-2 BDE memcpy */
5916 /* word3 iocb=iotag32 wqe=rsvd */
5917 wqe->words[3] = 0;
5918 /* word4 iocb=did wge=rsvd. */
5919 wqe->words[4] = 0;
5920 /* word5 iocb=rsvd wge=did */
5921 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5922 iocbq->iocb.un.elsreq64.remoteID);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5926
5927 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5928 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5929 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5930 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5931 iocbq->vport->vpi + phba->vpi_base);
5932 command_type = OTHER_COMMAND;
5933 break;
5934 case CMD_CLOSE_XRI_CN:
5935 case CMD_ABORT_XRI_CN:
5936 case CMD_ABORT_XRI_CX:
5937 /* words 0-2 memcpy should be 0 rserved */
5938 /* port will send abts */
5939 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5940 /*
5941 * The link is down so the fw does not need to send abts
5942 * on the wire.
5943 */
5944 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5945 else
5946 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5947 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5948 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5949 wqe->words[5] = 0;
5950 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5951 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5952 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5953 wqe->generic.abort_tag = abort_tag;
5954 /*
5955 * The abort handler will send us CMD_ABORT_XRI_CN or
5956 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5957 */
5958 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5959 cmnd = CMD_ABORT_XRI_CX;
5960 command_type = OTHER_COMMAND;
5961 xritag = 0;
5962 break;
5963 case CMD_XRI_ABORTED_CX:
5964 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5965 /* words0-2 are all 0's no bde */
5966 /* word3 and word4 are rsvrd */
5967 wqe->words[3] = 0;
5968 wqe->words[4] = 0;
5969 /* word5 iocb=rsvd wge=did */
5970 /* There is no remote port id in the IOCB? */
5971 /* Let this fall through and fail */
5972 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
5973 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
5974 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
5975 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
5976 default:
5977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5978 "2014 Invalid command 0x%x\n",
5979 iocbq->iocb.ulpCommand);
5980 return IOCB_ERROR;
5981 break;
5982
5983 }
5984 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
5985 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
5986 wqe->generic.abort_tag = abort_tag;
5987 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
5988 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
5989 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
5990 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
5991
5992 return 0;
5993}
5994
5995/**
5996 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
5997 * @phba: Pointer to HBA context object.
5998 * @ring_number: SLI ring number to issue iocb on.
5999 * @piocb: Pointer to command iocb.
6000 * @flag: Flag indicating if this command can be put into txq.
6001 *
6002 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6003 * an iocb command to an HBA with SLI-4 interface spec.
6004 *
6005 * This function is called with hbalock held. The function will return success
6006 * after it successfully submit the iocb to firmware or after adding to the
6007 * txq.
6008 **/
6009static int
6010__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6011 struct lpfc_iocbq *piocb, uint32_t flag)
6012{
6013 struct lpfc_sglq *sglq;
6014 uint16_t xritag;
6015 union lpfc_wqe wqe;
6016 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6017 uint32_t fcp_wqidx;
6018
6019 if (piocb->sli4_xritag == NO_XRI) {
6020 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6021 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6022 sglq = NULL;
6023 else {
6024 sglq = __lpfc_sli_get_sglq(phba);
6025 if (!sglq)
6026 return IOCB_ERROR;
6027 piocb->sli4_xritag = sglq->sli4_xritag;
6028 }
6029 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6030 sglq = NULL; /* These IO's already have an XRI and
6031 * a mapped sgl.
6032 */
6033 } else {
6034 /* This is a continuation of a commandi,(CX) so this
6035 * sglq is on the active list
6036 */
6037 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6038 if (!sglq)
6039 return IOCB_ERROR;
6040 }
6041
6042 if (sglq) {
6043 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6044 if (xritag != sglq->sli4_xritag)
6045 return IOCB_ERROR;
6046 }
6047
6048 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6049 return IOCB_ERROR;
6050
6051 if (piocb->iocb_flag & LPFC_IO_FCP) {
6052 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6053 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6054 return IOCB_ERROR;
6055 } else {
6056 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6057 return IOCB_ERROR;
6058 }
6059 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6060
6061 return 0;
6062}
6063
6064/**
6065 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6066 *
6067 * This routine wraps the actual lockless version for issusing IOCB function
6068 * pointer from the lpfc_hba struct.
6069 *
6070 * Return codes:
6071 * IOCB_ERROR - Error
6072 * IOCB_SUCCESS - Success
6073 * IOCB_BUSY - Busy
6074 **/
6075static inline int
6076__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6077 struct lpfc_iocbq *piocb, uint32_t flag)
6078{
6079 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6080}
6081
6082/**
6083 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6084 * @phba: The hba struct for which this call is being executed.
6085 * @dev_grp: The HBA PCI-Device group number.
6086 *
6087 * This routine sets up the SLI interface API function jump table in @phba
6088 * struct.
6089 * Returns: 0 - success, -ENODEV - failure.
6090 **/
6091int
6092lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6093{
6094
6095 switch (dev_grp) {
6096 case LPFC_PCI_DEV_LP:
6097 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6098 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6099 break;
6100 case LPFC_PCI_DEV_OC:
6101 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6102 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106 "1419 Invalid HBA PCI-device group: 0x%x\n",
6107 dev_grp);
6108 return -ENODEV;
6109 break;
6110 }
6111 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6112 return 0;
6113}
3836 6114
3837/** 6115/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6116 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6126 * functions which do not hold hbalock.
3849 **/ 6127 **/
3850int 6128int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6129lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6130 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6131{
3854 unsigned long iflags; 6132 unsigned long iflags;
3855 int rc; 6133 int rc;
3856 6134
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6135 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6136 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6137 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6138
3861 return rc; 6139 return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6426}
4149 6427
4150/** 6428/**
6429 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6430 * @phba: Pointer to HBA context object.
6431 *
6432 * This routine flushes the mailbox command subsystem. It will unconditionally
6433 * flush all the mailbox commands in the three possible stages in the mailbox
6434 * command sub-system: pending mailbox command queue; the outstanding mailbox
6435 * command; and completed mailbox command queue. It is caller's responsibility
6436 * to make sure that the driver is in the proper state to flush the mailbox
6437 * command sub-system. Namely, the posting of mailbox commands into the
6438 * pending mailbox command queue from the various clients must be stopped;
6439 * either the HBA is in a state that it will never works on the outstanding
6440 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6441 * mailbox command has been completed.
6442 **/
6443static void
6444lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6445{
6446 LIST_HEAD(completions);
6447 struct lpfc_sli *psli = &phba->sli;
6448 LPFC_MBOXQ_t *pmb;
6449 unsigned long iflag;
6450
6451 /* Flush all the mailbox commands in the mbox system */
6452 spin_lock_irqsave(&phba->hbalock, iflag);
6453 /* The pending mailbox command queue */
6454 list_splice_init(&phba->sli.mboxq, &completions);
6455 /* The outstanding active mailbox command */
6456 if (psli->mbox_active) {
6457 list_add_tail(&psli->mbox_active->list, &completions);
6458 psli->mbox_active = NULL;
6459 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6460 }
6461 /* The completed mailbox command queue */
6462 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6463 spin_unlock_irqrestore(&phba->hbalock, iflag);
6464
6465 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6466 while (!list_empty(&completions)) {
6467 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6468 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6469 if (pmb->mbox_cmpl)
6470 pmb->mbox_cmpl(phba, pmb);
6471 }
6472}
6473
6474/**
4151 * lpfc_sli_host_down - Vport cleanup function 6475 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6476 * @vport: Pointer to virtual port object.
4153 * 6477 *
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6564 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6565 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6566 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6567 unsigned long flags = 0;
6568 int i;
6569
6570 /* Shutdown the mailbox command sub-system */
6571 lpfc_sli_mbox_sys_shutdown(phba);
4246 6572
4247 lpfc_hba_down_prep(phba); 6573 lpfc_hba_down_prep(phba);
4248 6574
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6613
4288 /* Return any active mbox cmds */ 6614 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6615 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6616
4292 spin_lock(&phba->pport->work_port_lock); 6617 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6618 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6619 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6620
4296 /* Return any pending or completed mbox cmds */ 6621 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6622}
4298 if (psli->mbox_active) { 6623
4299 list_add_tail(&psli->mbox_active->list, &completions); 6624/**
4300 psli->mbox_active = NULL; 6625 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6626 * @phba: Pointer to HBA context object.
4302 } 6627 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6628 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6629 * shutting down the SLI4 HBA FCoE function. This function is called with no
6630 * lock held and always returns 1.
6631 *
6632 * This function does the following to cleanup driver FCoE function resources:
6633 * - Free discovery resources for each virtual port
6634 * - Cleanup any pending fabric iocbs
6635 * - Iterate through the iocb txq and free each entry in the list.
6636 * - Free up any buffer posted to the HBA.
6637 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6638 * - Free mailbox commands in the mailbox queue.
6639 **/
6640int
6641lpfc_sli4_hba_down(struct lpfc_hba *phba)
6642{
6643 /* Stop the SLI4 device port */
6644 lpfc_stop_port(phba);
6645
6646 /* Tear down the queues in the HBA */
6647 lpfc_sli4_queue_unset(phba);
6648
6649 /* unregister default FCFI from the HBA */
6650 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6651
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6652 return 1;
4313} 6653}
4314 6654
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 6979 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 6980 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 6981 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 6982 if (phba->sli_rev == LPFC_SLI_REV4)
6983 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6984 else
6985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 6986 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 6987 iabt->ulpClass = icmd->ulpClass;
4645 6988
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 6998 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 6999 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7000 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7001 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7002
4660 if (retval) 7003 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7004 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7181 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7182 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7183 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7184 if (phba->sli_rev == LPFC_SLI_REV4)
7185 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7186 else
7187 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7188 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7189 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7190 abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7196
4851 /* Setup callback routine and issue the command. */ 7197 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7198 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7199 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7200 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7201 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7202 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7203 errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7278 **/
4932int 7279int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7280lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7281 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7282 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7283 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7284 uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7309 readl(phba->HCregaddr); /* flush */
4963 } 7310 }
4964 7311
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7312 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7313 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7314 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7315 timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7424}
5078 7425
5079/** 7426/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7427 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7428 * @phba: Pointer to HBA context.
5082 * 7429 *
5083 * This function is called to cleanup any pending mailbox 7430 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7431 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7432 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7433 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7434 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7435 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7436 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7437 * as with offline or HBA function reset), this routine will wait for the
7438 * outstanding mailbox command to complete before invoking the mailbox
7439 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7440 **/
5091int 7441void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7442lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7443{
5094 struct lpfc_vport *vport = phba->pport; 7444 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7445 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7446 unsigned long timeout;
5097 7447
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7448 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7449 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7450 spin_unlock_irq(&phba->hbalock);
5101 7451
5102 /* 7452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7453 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7454 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7455 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7456 spin_unlock_irq(&phba->hbalock);
7457 /* Determine how long we might wait for the active mailbox
7458 * command to be gracefully completed by firmware.
7459 */
7460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7461 1000) + jiffies;
7462 while (phba->sli.mbox_active) {
7463 /* Check active mailbox complete status every 2ms */
7464 msleep(2);
7465 if (time_after(jiffies, timeout))
7466 /* Timeout, let the mailbox flush routine to
7467 * forcefully release active mailbox command
7468 */
7469 break;
7470 }
7471 }
7472 lpfc_sli_mbox_sys_flush(phba);
7473}
7474
7475/**
7476 * lpfc_sli_eratt_read - read sli-3 error attention events
7477 * @phba: Pointer to HBA context.
7478 *
7479 * This function is called to read the SLI3 device error attention registers
7480 * for possible error attention events. The caller must hold the hostlock
7481 * with spin_lock_irq().
7482 *
7483 * This fucntion returns 1 when there is Error Attention in the Host Attention
7484 * Register and returns 0 otherwise.
7485 **/
7486static int
7487lpfc_sli_eratt_read(struct lpfc_hba *phba)
7488{
7489 uint32_t ha_copy;
5111 7490
5112 if (ha_copy & HA_MBATT) 7491 /* Read chip Host Attention (HA) register */
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7492 ha_copy = readl(phba->HAregaddr);
5114 i = 0; 7493 if (ha_copy & HA_ERATT) {
7494 /* Read host status register to retrieve error event */
7495 lpfc_sli_read_hs(phba);
7496
7497 /* Check if there is a deferred error condition is active */
7498 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr);
7507 }
5115 7508
5116 msleep(1); 7509 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1;
5117 } 7516 }
7517 return 0;
7518}
7519
7520/**
7521 * lpfc_sli4_eratt_read - read sli-4 error attention events
7522 * @phba: Pointer to HBA context.
7523 *
7524 * This function is called to read the SLI4 device error attention registers
7525 * for possible error attention events. The caller must hold the hostlock
7526 * with spin_lock_irq().
7527 *
7528 * This fucntion returns 1 when there is Error Attention in the Host Attention
7529 * Register and returns 0 otherwise.
7530 **/
7531static int
7532lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7533{
7534 uint32_t uerr_sta_hi, uerr_sta_lo;
7535 uint32_t onlnreg0, onlnreg1;
5118 7536
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7537 /* For now, use the SLI4 device internal unrecoverable error
7538 * registers for error attention. This can be changed later.
7539 */
7540 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7541 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7542 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7543 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7544 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7545 if (uerr_sta_lo || uerr_sta_hi) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7547 "1423 HBA Unrecoverable error: "
7548 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7549 "online0_reg=0x%x, online1_reg=0x%x\n",
7550 uerr_sta_lo, uerr_sta_hi,
7551 onlnreg0, onlnreg1);
7552 /* TEMP: as the driver error recover logic is not
7553 * fully developed, we just log the error message
7554 * and the device error attention action is now
7555 * temporarily disabled.
7556 */
7557 return 0;
7558 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1;
7567 }
7568 }
7569 return 0;
5120} 7570}
5121 7571
5122/** 7572/**
5123 * lpfc_sli_check_eratt - check error attention events 7573 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7574 * @phba: Pointer to HBA context.
5125 * 7575 *
5126 * This function is called form timer soft interrupt context to check HBA's 7576 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7577 * error attention register bit for error attention events.
5128 * 7578 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7579 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7584{
5135 uint32_t ha_copy; 7585 uint32_t ha_copy;
5136 7586
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7587 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7588 * here. The brdkill function will do this.
5143 */ 7589 */
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7607 return 0;
5162 } 7608 }
5163 7609
5164 /* Read chip Host Attention (HA) register */ 7610 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7611 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7612 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7613 return 0;
7614 }
7615
7616 switch (phba->sli_rev) {
7617 case LPFC_SLI_REV2:
7618 case LPFC_SLI_REV3:
7619 /* Read chip Host Attention (HA) register */
7620 ha_copy = lpfc_sli_eratt_read(phba);
7621 break;
7622 case LPFC_SLI_REV4:
7623 /* Read devcie Uncoverable Error (UERR) registers */
7624 ha_copy = lpfc_sli4_eratt_read(phba);
7625 break;
7626 default:
7627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7628 "0299 Invalid SLI revision (%d)\n",
7629 phba->sli_rev);
7630 ha_copy = 0;
7631 break;
5186 } 7632 }
5187 spin_unlock_irq(&phba->hbalock); 7633 spin_unlock_irq(&phba->hbalock);
7634
7635 return ha_copy;
7636}
7637
7638/**
7639 * lpfc_intr_state_check - Check device state for interrupt handling
7640 * @phba: Pointer to HBA context.
7641 *
7642 * This inline routine checks whether a device or its PCI slot is in a state
7643 * that the interrupt should be handled.
7644 *
7645 * This function returns 0 if the device or the PCI slot is in a state that
7646 * interrupt should be handled, otherwise -EIO.
7647 */
7648static inline int
7649lpfc_intr_state_check(struct lpfc_hba *phba)
7650{
7651 /* If the pci channel is offline, ignore all the interrupts */
7652 if (unlikely(pci_channel_offline(phba->pcidev)))
7653 return -EIO;
7654
7655 /* Update device level interrupt statistics */
7656 phba->sli.slistat.sli_intr++;
7657
7658 /* Ignore all interrupts during initialization. */
7659 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7660 return -EIO;
7661
5188 return 0; 7662 return 0;
5189} 7663}
5190 7664
5191/** 7665/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7666 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7667 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7668 * @dev_id: The device context pointer.
5195 * 7669 *
5196 * This function is directly called from the PCI layer as an interrupt 7670 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7671 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7672 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7673 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7674 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7675 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7676 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7677 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7678 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7679 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7680 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7681 * structures.
5208 * 7682 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7683 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7684 * returns IRQ_NONE.
5211 **/ 7685 **/
5212irqreturn_t 7686irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7687lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7688{
5215 struct lpfc_hba *phba; 7689 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7690 uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7714 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7715 */
5242 if (phba->intr_type == MSIX) { 7716 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7717 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7718 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7719 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7720 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7721 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7740 * interrupt.
5272 */ 7741 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7742 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7743 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7744 return IRQ_NONE;
5276 } 7745 }
5277 7746
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7833
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7834 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7835 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7836 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7837 mbox = phba->mbox;
5369 vport = pmb->vport; 7838 vport = pmb->vport;
5370 7839
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 7903 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 7904 "0350 rc should have"
5436 "been MBX_BUSY"); 7905 "been MBX_BUSY");
5437 goto send_current_mbox; 7906 if (rc != MBX_NOT_FINISHED)
7907 goto send_current_mbox;
5438 } 7908 }
5439 } 7909 }
5440 spin_lock_irqsave( 7910 spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
5471 } 7941 }
5472 return IRQ_HANDLED; 7942 return IRQ_HANDLED;
5473 7943
5474} /* lpfc_sp_intr_handler */ 7944} /* lpfc_sli_sp_intr_handler */
5475 7945
5476/** 7946/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 7947 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 7948 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 7949 * @dev_id: The device context pointer.
5480 * 7950 *
5481 * This function is directly called from the PCI layer as an interrupt 7951 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 7952 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 7953 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 7954 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 7955 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 7956 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 7957 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 7958 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 7959 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 7960 * It gets the hbalock to access and update SLI data structures.
5491 * 7961 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 7962 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 7963 * returns IRQ_NONE.
5494 **/ 7964 **/
5495irqreturn_t 7965irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 7966lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 7967{
5498 struct lpfc_hba *phba; 7968 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 7969 uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 7983 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 7984 */
5515 if (phba->intr_type == MSIX) { 7985 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 7986 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 7987 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 7988 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 7989 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 7990 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 7995 * any interrupt.
5531 */ 7996 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 7998 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 7999 return IRQ_NONE;
5535 } 8000 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8001 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8031 }
5567 } 8032 }
5568 return IRQ_HANDLED; 8033 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8034} /* lpfc_sli_fp_intr_handler */
5570 8035
5571/** 8036/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8037 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8038 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8039 * @dev_id: The device context pointer.
5575 * 8040 *
5576 * This function is the device-level interrupt handler called from the PCI 8041 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8042 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8043 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8044 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8045 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8046 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8047 * function is called without any lock held. It gets the hbalock to access
8048 * and update SLI data structures.
5583 * 8049 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8050 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8051 * returns IRQ_NONE.
5586 **/ 8052 **/
5587irqreturn_t 8053irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8054lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8055{
5590 struct lpfc_hba *phba; 8056 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8057 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8066 if (unlikely(!phba))
5601 return IRQ_NONE; 8067 return IRQ_NONE;
5602 8068
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8069 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8070 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8071 return IRQ_NONE;
5613 8072
5614 spin_lock(&phba->hbalock); 8073 spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8109 status2 >>= (4*LPFC_ELS_RING);
5651 8110
5652 if (status1 || (status2 & HA_RXMASK)) 8111 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8112 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8113 else
5655 sp_irq_rc = IRQ_NONE; 8114 sp_irq_rc = IRQ_NONE;
5656 8115
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8129 status2 = 0;
5671 8130
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8131 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8132 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8133 else
5675 fp_irq_rc = IRQ_NONE; 8134 fp_irq_rc = IRQ_NONE;
5676 8135
5677 /* Return device-level interrupt handling status */ 8136 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8137 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8138} /* lpfc_sli_intr_handler */
8139
8140/**
8141 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8142 * @phba: pointer to lpfc hba data structure.
8143 *
8144 * This routine is invoked by the worker thread to process all the pending
8145 * SLI4 FCP abort XRI events.
8146 **/
8147void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8148{
8149 struct lpfc_cq_event *cq_event;
8150
8151 /* First, declare the fcp xri abort event has been handled */
8152 spin_lock_irq(&phba->hbalock);
8153 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8154 spin_unlock_irq(&phba->hbalock);
8155 /* Now, handle all the fcp xri abort events */
8156 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8157 /* Get the first event from the head of the event queue */
8158 spin_lock_irq(&phba->hbalock);
8159 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8160 cq_event, struct lpfc_cq_event, list);
8161 spin_unlock_irq(&phba->hbalock);
8162 /* Notify aborted XRI for FCP work queue */
8163 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8164 /* Free the event processed back to the free pool */
8165 lpfc_sli4_cq_event_release(phba, cq_event);
8166 }
8167}
8168
8169/**
8170 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8171 * @phba: pointer to lpfc hba data structure.
8172 *
8173 * This routine is invoked by the worker thread to process all the pending
8174 * SLI4 els abort xri events.
8175 **/
8176void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8177{
8178 struct lpfc_cq_event *cq_event;
8179
8180 /* First, declare the els xri abort event has been handled */
8181 spin_lock_irq(&phba->hbalock);
8182 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8183 spin_unlock_irq(&phba->hbalock);
8184 /* Now, handle all the els xri abort events */
8185 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8186 /* Get the first event from the head of the event queue */
8187 spin_lock_irq(&phba->hbalock);
8188 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8189 cq_event, struct lpfc_cq_event, list);
8190 spin_unlock_irq(&phba->hbalock);
8191 /* Notify aborted XRI for ELS work queue */
8192 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8193 /* Free the event processed back to the free pool */
8194 lpfc_sli4_cq_event_release(phba, cq_event);
8195 }
8196}
8197
8198static void
8199lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8200 struct lpfc_iocbq *pIocbOut,
8201 struct lpfc_wcqe_complete *wcqe)
8202{
8203 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8204
8205 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8206 sizeof(struct lpfc_iocbq) - offset);
8207 memset(&pIocbIn->sli4_info, 0,
8208 sizeof(struct lpfc_sli4_rspiocb_info));
8209 /* Map WCQE parameters into irspiocb parameters */
8210 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8211 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8212 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8213 pIocbIn->iocb.un.fcpi.fcpi_parm =
8214 pIocbOut->iocb.un.fcpi.fcpi_parm -
8215 wcqe->total_data_placed;
8216 else
8217 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8218 else
8219 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8220 /* Load in additional WCQE parameters */
8221 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8222 pIocbIn->sli4_info.bfield = 0;
8223 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8224 pIocbIn->sli4_info.bfield |= LPFC_XB;
8225 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8226 pIocbIn->sli4_info.bfield |= LPFC_PV;
8227 pIocbIn->sli4_info.priority =
8228 bf_get(lpfc_wcqe_c_priority, wcqe);
8229 }
8230}
8231
8232/**
8233 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8234 * @phba: Pointer to HBA context object.
8235 * @cqe: Pointer to mailbox completion queue entry.
8236 *
8237 * This routine process a mailbox completion queue entry with asynchrous
8238 * event.
8239 *
8240 * Return: true if work posted to worker thread, otherwise false.
8241 **/
8242static bool
8243lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8244{
8245 struct lpfc_cq_event *cq_event;
8246 unsigned long iflags;
8247
8248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8249 "0392 Async Event: word0:x%x, word1:x%x, "
8250 "word2:x%x, word3:x%x\n", mcqe->word0,
8251 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8252
8253 /* Allocate a new internal CQ_EVENT entry */
8254 cq_event = lpfc_sli4_cq_event_alloc(phba);
8255 if (!cq_event) {
8256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8257 "0394 Failed to allocate CQ_EVENT entry\n");
8258 return false;
8259 }
8260
8261 /* Move the CQE into an asynchronous event entry */
8262 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8263 spin_lock_irqsave(&phba->hbalock, iflags);
8264 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8265 /* Set the async event flag */
8266 phba->hba_flag |= ASYNC_EVENT;
8267 spin_unlock_irqrestore(&phba->hbalock, iflags);
8268
8269 return true;
8270}
8271
8272/**
8273 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8274 * @phba: Pointer to HBA context object.
8275 * @cqe: Pointer to mailbox completion queue entry.
8276 *
8277 * This routine process a mailbox completion queue entry with mailbox
8278 * completion event.
8279 *
8280 * Return: true if work posted to worker thread, otherwise false.
8281 **/
8282static bool
8283lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8284{
8285 uint32_t mcqe_status;
8286 MAILBOX_t *mbox, *pmbox;
8287 struct lpfc_mqe *mqe;
8288 struct lpfc_vport *vport;
8289 struct lpfc_nodelist *ndlp;
8290 struct lpfc_dmabuf *mp;
8291 unsigned long iflags;
8292 LPFC_MBOXQ_t *pmb;
8293 bool workposted = false;
8294 int rc;
8295
8296 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8297 if (!bf_get(lpfc_trailer_completed, mcqe))
8298 goto out_no_mqe_complete;
8299
8300 /* Get the reference to the active mbox command */
8301 spin_lock_irqsave(&phba->hbalock, iflags);
8302 pmb = phba->sli.mbox_active;
8303 if (unlikely(!pmb)) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8305 "1832 No pending MBOX command to handle\n");
8306 spin_unlock_irqrestore(&phba->hbalock, iflags);
8307 goto out_no_mqe_complete;
8308 }
8309 spin_unlock_irqrestore(&phba->hbalock, iflags);
8310 mqe = &pmb->u.mqe;
8311 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8312 mbox = phba->mbox;
8313 vport = pmb->vport;
8314
8315 /* Reset heartbeat timer */
8316 phba->last_completion_time = jiffies;
8317 del_timer(&phba->sli.mbox_tmo);
8318
8319 /* Move mbox data to caller's mailbox region, do endian swapping */
8320 if (pmb->mbox_cmpl && mbox)
8321 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8322 /* Set the mailbox status with SLI4 range 0x4000 */
8323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8324 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8325 bf_set(lpfc_mqe_status, mqe,
8326 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8327
8328 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8329 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8331 "MBOX dflt rpi: status:x%x rpi:x%x",
8332 mcqe_status,
8333 pmbox->un.varWords[0], 0);
8334 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8335 mp = (struct lpfc_dmabuf *)(pmb->context1);
8336 ndlp = (struct lpfc_nodelist *)pmb->context2;
8337 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8338 * RID of the PPI using the same mbox buffer.
8339 */
8340 lpfc_unreg_login(phba, vport->vpi,
8341 pmbox->un.varWords[0], pmb);
8342 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8343 pmb->context1 = mp;
8344 pmb->context2 = ndlp;
8345 pmb->vport = vport;
8346 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8347 if (rc != MBX_BUSY)
8348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8349 LOG_SLI, "0385 rc should "
8350 "have been MBX_BUSY\n");
8351 if (rc != MBX_NOT_FINISHED)
8352 goto send_current_mbox;
8353 }
8354 }
8355 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8356 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8357 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8358
8359 /* There is mailbox completion work to do */
8360 spin_lock_irqsave(&phba->hbalock, iflags);
8361 __lpfc_mbox_cmpl_put(phba, pmb);
8362 phba->work_ha |= HA_MBATT;
8363 spin_unlock_irqrestore(&phba->hbalock, iflags);
8364 workposted = true;
8365
8366send_current_mbox:
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 /* Release the mailbox command posting token */
8369 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8370 /* Setting active mailbox pointer need to be in sync to flag clear */
8371 phba->sli.mbox_active = NULL;
8372 spin_unlock_irqrestore(&phba->hbalock, iflags);
8373 /* Wake up worker thread to post the next pending mailbox command */
8374 lpfc_worker_wake_up(phba);
8375out_no_mqe_complete:
8376 if (bf_get(lpfc_trailer_consumed, mcqe))
8377 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8378 return workposted;
8379}
8380
8381/**
8382 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8383 * @phba: Pointer to HBA context object.
8384 * @cqe: Pointer to mailbox completion queue entry.
8385 *
8386 * This routine process a mailbox completion queue entry, it invokes the
8387 * proper mailbox complete handling or asynchrous event handling routine
8388 * according to the MCQE's async bit.
8389 *
8390 * Return: true if work posted to worker thread, otherwise false.
8391 **/
8392static bool
8393lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8394{
8395 struct lpfc_mcqe mcqe;
8396 bool workposted;
8397
8398 /* Copy the mailbox MCQE and convert endian order as needed */
8399 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8400
8401 /* Invoke the proper event handling routine */
8402 if (!bf_get(lpfc_trailer_async, &mcqe))
8403 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8404 else
8405 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8406 return workposted;
8407}
8408
8409/**
8410 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8411 * @phba: Pointer to HBA context object.
8412 * @wcqe: Pointer to work-queue completion queue entry.
8413 *
8414 * This routine handles an ELS work-queue completion event.
8415 *
8416 * Return: true if work posted to worker thread, otherwise false.
8417 **/
8418static bool
8419lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8420 struct lpfc_wcqe_complete *wcqe)
8421{
8422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8423 struct lpfc_iocbq *cmdiocbq;
8424 struct lpfc_iocbq *irspiocbq;
8425 unsigned long iflags;
8426 bool workposted = false;
8427
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pring->stats.iocb_event++;
8430 /* Look up the ELS command IOCB and create pseudo response IOCB */
8431 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434
8435 if (unlikely(!cmdiocbq)) {
8436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8437 "0386 ELS complete with no corresponding "
8438 "cmdiocb: iotag (%d)\n",
8439 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8440 return workposted;
8441 }
8442
8443 /* Fake the irspiocbq and copy necessary response information */
8444 irspiocbq = lpfc_sli_get_iocbq(phba);
8445 if (!irspiocbq) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8447 "0387 Failed to allocate an iocbq\n");
8448 return workposted;
8449 }
8450 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8451
8452 /* Add the irspiocb to the response IOCB work list */
8453 spin_lock_irqsave(&phba->hbalock, iflags);
8454 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8455 /* Indicate ELS ring attention */
8456 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8457 spin_unlock_irqrestore(&phba->hbalock, iflags);
8458 workposted = true;
8459
8460 return workposted;
8461}
8462
8463/**
8464 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8465 * @phba: Pointer to HBA context object.
8466 * @wcqe: Pointer to work-queue completion queue entry.
8467 *
8468 * This routine handles slow-path WQ entry comsumed event by invoking the
8469 * proper WQ release routine to the slow-path WQ.
8470 **/
8471static void
8472lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8473 struct lpfc_wcqe_release *wcqe)
8474{
8475 /* Check for the slow-path ELS work queue */
8476 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8477 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8478 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8479 else
8480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8481 "2579 Slow-path wqe consume event carries "
8482 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8483 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8484 phba->sli4_hba.els_wq->queue_id);
8485}
8486
8487/**
8488 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8489 * @phba: Pointer to HBA context object.
8490 * @cq: Pointer to a WQ completion queue.
8491 * @wcqe: Pointer to work-queue completion queue entry.
8492 *
8493 * This routine handles an XRI abort event.
8494 *
8495 * Return: true if work posted to worker thread, otherwise false.
8496 **/
8497static bool
8498lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8499 struct lpfc_queue *cq,
8500 struct sli4_wcqe_xri_aborted *wcqe)
8501{
8502 bool workposted = false;
8503 struct lpfc_cq_event *cq_event;
8504 unsigned long iflags;
8505
8506 /* Allocate a new internal CQ_EVENT entry */
8507 cq_event = lpfc_sli4_cq_event_alloc(phba);
8508 if (!cq_event) {
8509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8510 "0602 Failed to allocate CQ_EVENT entry\n");
8511 return false;
8512 }
8513
8514 /* Move the CQE into the proper xri abort event list */
8515 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8516 switch (cq->subtype) {
8517 case LPFC_FCP:
8518 spin_lock_irqsave(&phba->hbalock, iflags);
8519 list_add_tail(&cq_event->list,
8520 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8521 /* Set the fcp xri abort event flag */
8522 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8523 spin_unlock_irqrestore(&phba->hbalock, iflags);
8524 workposted = true;
8525 break;
8526 case LPFC_ELS:
8527 spin_lock_irqsave(&phba->hbalock, iflags);
8528 list_add_tail(&cq_event->list,
8529 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8530 /* Set the els xri abort event flag */
8531 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8532 spin_unlock_irqrestore(&phba->hbalock, iflags);
8533 workposted = true;
8534 break;
8535 default:
8536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8537 "0603 Invalid work queue CQE subtype (x%x)\n",
8538 cq->subtype);
8539 workposted = false;
8540 break;
8541 }
8542 return workposted;
8543}
8544
8545/**
8546 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8547 * @phba: Pointer to HBA context object.
8548 * @cq: Pointer to the completion queue.
8549 * @wcqe: Pointer to a completion queue entry.
8550 *
8551 * This routine process a slow-path work-queue completion queue entry.
8552 *
8553 * Return: true if work posted to worker thread, otherwise false.
8554 **/
8555static bool
8556lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8557 struct lpfc_cqe *cqe)
8558{
8559 struct lpfc_wcqe_complete wcqe;
8560 bool workposted = false;
8561
8562 /* Copy the work queue CQE and convert endian order if needed */
8563 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8564
8565 /* Check and process for different type of WCQE and dispatch */
8566 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8567 case CQE_CODE_COMPL_WQE:
8568 /* Process the WQ complete event */
8569 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8570 (struct lpfc_wcqe_complete *)&wcqe);
8571 break;
8572 case CQE_CODE_RELEASE_WQE:
8573 /* Process the WQ release event */
8574 lpfc_sli4_sp_handle_rel_wcqe(phba,
8575 (struct lpfc_wcqe_release *)&wcqe);
8576 break;
8577 case CQE_CODE_XRI_ABORTED:
8578 /* Process the WQ XRI abort event */
8579 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8580 (struct sli4_wcqe_xri_aborted *)&wcqe);
8581 break;
8582 default:
8583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8584 "0388 Not a valid WCQE code: x%x\n",
8585 bf_get(lpfc_wcqe_c_code, &wcqe));
8586 break;
8587 }
8588 return workposted;
8589}
8590
8591/**
8592 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8593 * @phba: Pointer to HBA context object.
8594 * @rcqe: Pointer to receive-queue completion queue entry.
8595 *
8596 * This routine process a receive-queue completion queue entry.
8597 *
8598 * Return: true if work posted to worker thread, otherwise false.
8599 **/
8600static bool
8601lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8602{
8603 struct lpfc_rcqe rcqe;
8604 bool workposted = false;
8605 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8606 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8607 struct hbq_dmabuf *dma_buf;
8608 uint32_t status;
8609 unsigned long iflags;
8610
8611 /* Copy the receive queue CQE and convert endian order if needed */
8612 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8613 lpfc_sli4_rq_release(hrq, drq);
8614 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8615 goto out;
8616 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8617 goto out;
8618
8619 status = bf_get(lpfc_rcqe_status, &rcqe);
8620 switch (status) {
8621 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8623 "2537 Receive Frame Truncated!!\n");
8624 case FC_STATUS_RQ_SUCCESS:
8625 spin_lock_irqsave(&phba->hbalock, iflags);
8626 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8627 if (!dma_buf) {
8628 spin_unlock_irqrestore(&phba->hbalock, iflags);
8629 goto out;
8630 }
8631 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8632 /* save off the frame for the word thread to process */
8633 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8634 /* Frame received */
8635 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8636 spin_unlock_irqrestore(&phba->hbalock, iflags);
8637 workposted = true;
8638 break;
8639 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8640 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8641 /* Post more buffers if possible */
8642 spin_lock_irqsave(&phba->hbalock, iflags);
8643 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8644 spin_unlock_irqrestore(&phba->hbalock, iflags);
8645 workposted = true;
8646 break;
8647 }
8648out:
8649 return workposted;
8650
8651}
8652
8653/**
8654 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8655 * @phba: Pointer to HBA context object.
8656 * @eqe: Pointer to fast-path event queue entry.
8657 *
8658 * This routine process a event queue entry from the slow-path event queue.
8659 * It will check the MajorCode and MinorCode to determine this is for a
8660 * completion event on a completion queue, if not, an error shall be logged
8661 * and just return. Otherwise, it will get to the corresponding completion
8662 * queue and process all the entries on that completion queue, rearm the
8663 * completion queue, and then return.
8664 *
8665 **/
8666static void
8667lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8668{
8669 struct lpfc_queue *cq = NULL, *childq, *speq;
8670 struct lpfc_cqe *cqe;
8671 bool workposted = false;
8672 int ecount = 0;
8673 uint16_t cqid;
8674
8675 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8676 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8678 "0359 Not a valid slow-path completion "
8679 "event: majorcode=x%x, minorcode=x%x\n",
8680 bf_get(lpfc_eqe_major_code, eqe),
8681 bf_get(lpfc_eqe_minor_code, eqe));
8682 return;
8683 }
8684
8685 /* Get the reference to the corresponding CQ */
8686 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8687
8688 /* Search for completion queue pointer matching this cqid */
8689 speq = phba->sli4_hba.sp_eq;
8690 list_for_each_entry(childq, &speq->child_list, list) {
8691 if (childq->queue_id == cqid) {
8692 cq = childq;
8693 break;
8694 }
8695 }
8696 if (unlikely(!cq)) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8698 "0365 Slow-path CQ identifier (%d) does "
8699 "not exist\n", cqid);
8700 return;
8701 }
8702
8703 /* Process all the entries to the CQ */
8704 switch (cq->type) {
8705 case LPFC_MCQ:
8706 while ((cqe = lpfc_sli4_cq_get(cq))) {
8707 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8708 if (!(++ecount % LPFC_GET_QE_REL_INT))
8709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8710 }
8711 break;
8712 case LPFC_WCQ:
8713 while ((cqe = lpfc_sli4_cq_get(cq))) {
8714 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8715 if (!(++ecount % LPFC_GET_QE_REL_INT))
8716 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8717 }
8718 break;
8719 case LPFC_RCQ:
8720 while ((cqe = lpfc_sli4_cq_get(cq))) {
8721 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8722 if (!(++ecount % LPFC_GET_QE_REL_INT))
8723 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8724 }
8725 break;
8726 default:
8727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8728 "0370 Invalid completion queue type (%d)\n",
8729 cq->type);
8730 return;
8731 }
8732
8733 /* Catch the no cq entry condition, log an error */
8734 if (unlikely(ecount == 0))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "0371 No entry from the CQ: identifier "
8737 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8738
8739 /* In any case, flash and re-arm the RCQ */
8740 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8741
8742 /* wake up worker thread if there are works to be done */
8743 if (workposted)
8744 lpfc_worker_wake_up(phba);
8745}
8746
8747/**
8748 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8749 * @eqe: Pointer to fast-path completion queue entry.
8750 *
8751 * This routine process a fast-path work queue completion entry from fast-path
8752 * event queue for FCP command response completion.
8753 **/
8754static void
8755lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8756 struct lpfc_wcqe_complete *wcqe)
8757{
8758 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8759 struct lpfc_iocbq *cmdiocbq;
8760 struct lpfc_iocbq irspiocbq;
8761 unsigned long iflags;
8762
8763 spin_lock_irqsave(&phba->hbalock, iflags);
8764 pring->stats.iocb_event++;
8765 spin_unlock_irqrestore(&phba->hbalock, iflags);
8766
8767 /* Check for response status */
8768 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8769 /* If resource errors reported from HBA, reduce queue
8770 * depth of the SCSI device.
8771 */
8772 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8773 IOSTAT_LOCAL_REJECT) &&
8774 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8775 phba->lpfc_rampdown_queue_depth(phba);
8776 }
8777 /* Log the error status */
8778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8779 "0373 FCP complete error: status=x%x, "
8780 "hw_status=x%x, total_data_specified=%d, "
8781 "parameter=x%x, word3=x%x\n",
8782 bf_get(lpfc_wcqe_c_status, wcqe),
8783 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8784 wcqe->total_data_placed, wcqe->parameter,
8785 wcqe->word3);
8786 }
8787
8788 /* Look up the FCP command IOCB and create pseudo response IOCB */
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8791 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8792 spin_unlock_irqrestore(&phba->hbalock, iflags);
8793 if (unlikely(!cmdiocbq)) {
8794 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8795 "0374 FCP complete with no corresponding "
8796 "cmdiocb: iotag (%d)\n",
8797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8798 return;
8799 }
8800 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8802 "0375 FCP cmdiocb not callback function "
8803 "iotag: (%d)\n",
8804 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8805 return;
8806 }
8807
8808 /* Fake the irspiocb and copy necessary response information */
8809 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8810
8811 /* Pass the cmd_iocb and the rsp state to the upper layer */
8812 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8813}
8814
8815/**
8816 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8817 * @phba: Pointer to HBA context object.
8818 * @cq: Pointer to completion queue.
8819 * @wcqe: Pointer to work-queue completion queue entry.
8820 *
8821 * This routine handles an fast-path WQ entry comsumed event by invoking the
8822 * proper WQ release routine to the slow-path WQ.
8823 **/
8824static void
8825lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8826 struct lpfc_wcqe_release *wcqe)
8827{
8828 struct lpfc_queue *childwq;
8829 bool wqid_matched = false;
8830 uint16_t fcp_wqid;
8831
8832 /* Check for fast-path FCP work queue release */
8833 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8834 list_for_each_entry(childwq, &cq->child_list, list) {
8835 if (childwq->queue_id == fcp_wqid) {
8836 lpfc_sli4_wq_release(childwq,
8837 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8838 wqid_matched = true;
8839 break;
8840 }
8841 }
8842 /* Report warning log message if no match found */
8843 if (wqid_matched != true)
8844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8845 "2580 Fast-path wqe consume event carries "
8846 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8847}
8848
8849/**
8850 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8851 * @cq: Pointer to the completion queue.
8852 * @eqe: Pointer to fast-path completion queue entry.
8853 *
8854 * This routine process a fast-path work queue completion entry from fast-path
8855 * event queue for FCP command response completion.
8856 **/
8857static int
8858lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8859 struct lpfc_cqe *cqe)
8860{
8861 struct lpfc_wcqe_release wcqe;
8862 bool workposted = false;
8863
8864 /* Copy the work queue CQE and convert endian order if needed */
8865 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8866
8867 /* Check and process for different type of WCQE and dispatch */
8868 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8869 case CQE_CODE_COMPL_WQE:
8870 /* Process the WQ complete event */
8871 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8872 (struct lpfc_wcqe_complete *)&wcqe);
8873 break;
8874 case CQE_CODE_RELEASE_WQE:
8875 /* Process the WQ release event */
8876 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8877 (struct lpfc_wcqe_release *)&wcqe);
8878 break;
8879 case CQE_CODE_XRI_ABORTED:
8880 /* Process the WQ XRI abort event */
8881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8882 (struct sli4_wcqe_xri_aborted *)&wcqe);
8883 break;
8884 default:
8885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8886 "0144 Not a valid WCQE code: x%x\n",
8887 bf_get(lpfc_wcqe_c_code, &wcqe));
8888 break;
8889 }
8890 return workposted;
8891}
8892
8893/**
8894 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8895 * @phba: Pointer to HBA context object.
8896 * @eqe: Pointer to fast-path event queue entry.
8897 *
8898 * This routine process a event queue entry from the fast-path event queue.
8899 * It will check the MajorCode and MinorCode to determine this is for a
8900 * completion event on a completion queue, if not, an error shall be logged
8901 * and just return. Otherwise, it will get to the corresponding completion
8902 * queue and process all the entries on the completion queue, rearm the
8903 * completion queue, and then return.
8904 **/
8905static void
8906lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
8907 uint32_t fcp_cqidx)
8908{
8909 struct lpfc_queue *cq;
8910 struct lpfc_cqe *cqe;
8911 bool workposted = false;
8912 uint16_t cqid;
8913 int ecount = 0;
8914
8915 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
8916 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8918 "0366 Not a valid fast-path completion "
8919 "event: majorcode=x%x, minorcode=x%x\n",
8920 bf_get(lpfc_eqe_major_code, eqe),
8921 bf_get(lpfc_eqe_minor_code, eqe));
8922 return;
8923 }
8924
8925 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
8926 if (unlikely(!cq)) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8928 "0367 Fast-path completion queue does not "
8929 "exist\n");
8930 return;
8931 }
8932
8933 /* Get the reference to the corresponding CQ */
8934 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8935 if (unlikely(cqid != cq->queue_id)) {
8936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8937 "0368 Miss-matched fast-path completion "
8938 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
8939 cqid, cq->queue_id);
8940 return;
8941 }
8942
8943 /* Process all the entries to the CQ */
8944 while ((cqe = lpfc_sli4_cq_get(cq))) {
8945 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
8946 if (!(++ecount % LPFC_GET_QE_REL_INT))
8947 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8948 }
8949
8950 /* Catch the no cq entry condition */
8951 if (unlikely(ecount == 0))
8952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8953 "0369 No entry from fast-path completion "
8954 "queue fcpcqid=%d\n", cq->queue_id);
8955
8956 /* In any case, flash and re-arm the CQ */
8957 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8958
8959 /* wake up worker thread if there are works to be done */
8960 if (workposted)
8961 lpfc_worker_wake_up(phba);
8962}
8963
8964static void
8965lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
8966{
8967 struct lpfc_eqe *eqe;
8968
8969 /* walk all the EQ entries and drop on the floor */
8970 while ((eqe = lpfc_sli4_eq_get(eq)))
8971 ;
8972
8973 /* Clear and re-arm the EQ */
8974 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
8975}
8976
8977/**
8978 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
8979 * @irq: Interrupt number.
8980 * @dev_id: The device context pointer.
8981 *
8982 * This function is directly called from the PCI layer as an interrupt
8983 * service routine when device with SLI-4 interface spec is enabled with
8984 * MSI-X multi-message interrupt mode and there are slow-path events in
8985 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8986 * interrupt mode, this function is called as part of the device-level
8987 * interrupt handler. When the PCI slot is in error recovery or the HBA is
8988 * undergoing initialization, the interrupt handler will not process the
8989 * interrupt. The link attention and ELS ring attention events are handled
8990 * by the worker thread. The interrupt handler signals the worker thread
8991 * and returns for these events. This function is called without any lock
8992 * held. It gets the hbalock to access and update SLI data structures.
8993 *
8994 * This function returns IRQ_HANDLED when interrupt is handled else it
8995 * returns IRQ_NONE.
8996 **/
8997irqreturn_t
8998lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
8999{
9000 struct lpfc_hba *phba;
9001 struct lpfc_queue *speq;
9002 struct lpfc_eqe *eqe;
9003 unsigned long iflag;
9004 int ecount = 0;
9005
9006 /*
9007 * Get the driver's phba structure from the dev_id
9008 */
9009 phba = (struct lpfc_hba *)dev_id;
9010
9011 if (unlikely(!phba))
9012 return IRQ_NONE;
9013
9014 /* Get to the EQ struct associated with this vector */
9015 speq = phba->sli4_hba.sp_eq;
9016
9017 /* Check device state for handling interrupt */
9018 if (unlikely(lpfc_intr_state_check(phba))) {
9019 /* Check again for link_state with lock held */
9020 spin_lock_irqsave(&phba->hbalock, iflag);
9021 if (phba->link_state < LPFC_LINK_DOWN)
9022 /* Flush, clear interrupt, and rearm the EQ */
9023 lpfc_sli4_eq_flush(phba, speq);
9024 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 return IRQ_NONE;
9026 }
9027
9028 /*
9029 * Process all the event on FCP slow-path EQ
9030 */
9031 while ((eqe = lpfc_sli4_eq_get(speq))) {
9032 lpfc_sli4_sp_handle_eqe(phba, eqe);
9033 if (!(++ecount % LPFC_GET_QE_REL_INT))
9034 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9035 }
9036
9037 /* Always clear and re-arm the slow-path EQ */
9038 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9039
9040 /* Catch the no cq entry condition */
9041 if (unlikely(ecount == 0)) {
9042 if (phba->intr_type == MSIX)
9043 /* MSI-X treated interrupt served as no EQ share INT */
9044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9045 "0357 MSI-X interrupt with no EQE\n");
9046 else
9047 /* Non MSI-X treated on interrupt as EQ share INT */
9048 return IRQ_NONE;
9049 }
9050
9051 return IRQ_HANDLED;
9052} /* lpfc_sli4_sp_intr_handler */
9053
9054/**
9055 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9056 * @irq: Interrupt number.
9057 * @dev_id: The device context pointer.
9058 *
9059 * This function is directly called from the PCI layer as an interrupt
9060 * service routine when device with SLI-4 interface spec is enabled with
9061 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9062 * ring event in the HBA. However, when the device is enabled with either
9063 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9064 * device-level interrupt handler. When the PCI slot is in error recovery
9065 * or the HBA is undergoing initialization, the interrupt handler will not
9066 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9067 * the intrrupt context. This function is called without any lock held.
9068 * It gets the hbalock to access and update SLI data structures. Note that,
9069 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9070 * equal to that of FCP CQ index.
9071 *
9072 * This function returns IRQ_HANDLED when interrupt is handled else it
9073 * returns IRQ_NONE.
9074 **/
9075irqreturn_t
9076lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9077{
9078 struct lpfc_hba *phba;
9079 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9080 struct lpfc_queue *fpeq;
9081 struct lpfc_eqe *eqe;
9082 unsigned long iflag;
9083 int ecount = 0;
9084 uint32_t fcp_eqidx;
9085
9086 /* Get the driver's phba structure from the dev_id */
9087 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9088 phba = fcp_eq_hdl->phba;
9089 fcp_eqidx = fcp_eq_hdl->idx;
9090
9091 if (unlikely(!phba))
9092 return IRQ_NONE;
9093
9094 /* Get to the EQ struct associated with this vector */
9095 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9096
9097 /* Check device state for handling interrupt */
9098 if (unlikely(lpfc_intr_state_check(phba))) {
9099 /* Check again for link_state with lock held */
9100 spin_lock_irqsave(&phba->hbalock, iflag);
9101 if (phba->link_state < LPFC_LINK_DOWN)
9102 /* Flush, clear interrupt, and rearm the EQ */
9103 lpfc_sli4_eq_flush(phba, fpeq);
9104 spin_unlock_irqrestore(&phba->hbalock, iflag);
9105 return IRQ_NONE;
9106 }
9107
9108 /*
9109 * Process all the event on FCP fast-path EQ
9110 */
9111 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9112 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9113 if (!(++ecount % LPFC_GET_QE_REL_INT))
9114 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9115 }
9116
9117 /* Always clear and re-arm the fast-path EQ */
9118 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9119
9120 if (unlikely(ecount == 0)) {
9121 if (phba->intr_type == MSIX)
9122 /* MSI-X treated interrupt served as no EQ share INT */
9123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9124 "0358 MSI-X interrupt with no EQE\n");
9125 else
9126 /* Non MSI-X treated on interrupt as EQ share INT */
9127 return IRQ_NONE;
9128 }
9129
9130 return IRQ_HANDLED;
9131} /* lpfc_sli4_fp_intr_handler */
9132
9133/**
9134 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9135 * @irq: Interrupt number.
9136 * @dev_id: The device context pointer.
9137 *
9138 * This function is the device-level interrupt handler to device with SLI-4
9139 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9140 * interrupt mode is enabled and there is an event in the HBA which requires
9141 * driver attention. This function invokes the slow-path interrupt attention
9142 * handling function and fast-path interrupt attention handling function in
9143 * turn to process the relevant HBA attention events. This function is called
9144 * without any lock held. It gets the hbalock to access and update SLI data
9145 * structures.
9146 *
9147 * This function returns IRQ_HANDLED when interrupt is handled, else it
9148 * returns IRQ_NONE.
9149 **/
9150irqreturn_t
9151lpfc_sli4_intr_handler(int irq, void *dev_id)
9152{
9153 struct lpfc_hba *phba;
9154 irqreturn_t sp_irq_rc, fp_irq_rc;
9155 bool fp_handled = false;
9156 uint32_t fcp_eqidx;
9157
9158 /* Get the driver's phba structure from the dev_id */
9159 phba = (struct lpfc_hba *)dev_id;
9160
9161 if (unlikely(!phba))
9162 return IRQ_NONE;
9163
9164 /*
9165 * Invokes slow-path host attention interrupt handling as appropriate.
9166 */
9167 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9168
9169 /*
9170 * Invoke fast-path host attention interrupt handling as appropriate.
9171 */
9172 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9173 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9174 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9175 if (fp_irq_rc == IRQ_HANDLED)
9176 fp_handled |= true;
9177 }
9178
9179 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9180} /* lpfc_sli4_intr_handler */
9181
9182/**
9183 * lpfc_sli4_queue_free - free a queue structure and associated memory
9184 * @queue: The queue structure to free.
9185 *
9186 * This function frees a queue structure and the DMAable memeory used for
9187 * the host resident queue. This function must be called after destroying the
9188 * queue on the HBA.
9189 **/
9190void
9191lpfc_sli4_queue_free(struct lpfc_queue *queue)
9192{
9193 struct lpfc_dmabuf *dmabuf;
9194
9195 if (!queue)
9196 return;
9197
9198 while (!list_empty(&queue->page_list)) {
9199 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9200 list);
9201 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9202 dmabuf->virt, dmabuf->phys);
9203 kfree(dmabuf);
9204 }
9205 kfree(queue);
9206 return;
9207}
9208
9209/**
9210 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9211 * @phba: The HBA that this queue is being created on.
9212 * @entry_size: The size of each queue entry for this queue.
9213 * @entry count: The number of entries that this queue will handle.
9214 *
9215 * This function allocates a queue structure and the DMAable memory used for
9216 * the host resident queue. This function must be called before creating the
9217 * queue on the HBA.
9218 **/
9219struct lpfc_queue *
9220lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9221 uint32_t entry_count)
9222{
9223 struct lpfc_queue *queue;
9224 struct lpfc_dmabuf *dmabuf;
9225 int x, total_qe_count;
9226 void *dma_pointer;
9227
9228
9229 queue = kzalloc(sizeof(struct lpfc_queue) +
9230 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9231 if (!queue)
9232 return NULL;
9233 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9234 INIT_LIST_HEAD(&queue->list);
9235 INIT_LIST_HEAD(&queue->page_list);
9236 INIT_LIST_HEAD(&queue->child_list);
9237 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9238 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9239 if (!dmabuf)
9240 goto out_fail;
9241 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9242 PAGE_SIZE, &dmabuf->phys,
9243 GFP_KERNEL);
9244 if (!dmabuf->virt) {
9245 kfree(dmabuf);
9246 goto out_fail;
9247 }
9248 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */
9251 dma_pointer = dmabuf->virt;
9252 for (; total_qe_count < entry_count &&
9253 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9254 total_qe_count++, dma_pointer += entry_size) {
9255 queue->qe[total_qe_count].address = dma_pointer;
9256 }
9257 }
9258 queue->entry_size = entry_size;
9259 queue->entry_count = entry_count;
9260 queue->phba = phba;
9261
9262 return queue;
9263out_fail:
9264 lpfc_sli4_queue_free(queue);
9265 return NULL;
9266}
9267
9268/**
9269 * lpfc_eq_create - Create an Event Queue on the HBA
9270 * @phba: HBA structure that indicates port to create a queue on.
9271 * @eq: The queue structure to use to create the event queue.
9272 * @imax: The maximum interrupt per second limit.
9273 *
9274 * This function creates an event queue, as detailed in @eq, on a port,
9275 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9276 *
9277 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9278 * is used to get the entry count and entry size that are necessary to
9279 * determine the number of pages to allocate and use for this queue. This
9280 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9281 * event queue. This function is asynchronous and will wait for the mailbox
9282 * command to finish before continuing.
9283 *
9284 * On success this function will return a zero. If unable to allocate enough
9285 * memory this function will return ENOMEM. If the queue create mailbox command
9286 * fails this function will return ENXIO.
9287 **/
9288uint32_t
9289lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9290{
9291 struct lpfc_mbx_eq_create *eq_create;
9292 LPFC_MBOXQ_t *mbox;
9293 int rc, length, status = 0;
9294 struct lpfc_dmabuf *dmabuf;
9295 uint32_t shdr_status, shdr_add_status;
9296 union lpfc_sli4_cfg_shdr *shdr;
9297 uint16_t dmult;
9298
9299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9300 if (!mbox)
9301 return -ENOMEM;
9302 length = (sizeof(struct lpfc_mbx_eq_create) -
9303 sizeof(struct lpfc_sli4_cfg_mhdr));
9304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9305 LPFC_MBOX_OPCODE_EQ_CREATE,
9306 length, LPFC_SLI4_MBX_EMBED);
9307 eq_create = &mbox->u.mqe.un.eq_create;
9308 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9309 eq->page_count);
9310 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9311 LPFC_EQE_SIZE);
9312 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9313 /* Calculate delay multiper from maximum interrupt per second */
9314 dmult = LPFC_DMULT_CONST/imax - 1;
9315 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9316 dmult);
9317 switch (eq->entry_count) {
9318 default:
9319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9320 "0360 Unsupported EQ count. (%d)\n",
9321 eq->entry_count);
9322 if (eq->entry_count < 256)
9323 return -EINVAL;
9324 /* otherwise default to smallest count (drop through) */
9325 case 256:
9326 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9327 LPFC_EQ_CNT_256);
9328 break;
9329 case 512:
9330 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9331 LPFC_EQ_CNT_512);
9332 break;
9333 case 1024:
9334 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9335 LPFC_EQ_CNT_1024);
9336 break;
9337 case 2048:
9338 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9339 LPFC_EQ_CNT_2048);
9340 break;
9341 case 4096:
9342 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9343 LPFC_EQ_CNT_4096);
9344 break;
9345 }
9346 list_for_each_entry(dmabuf, &eq->page_list, list) {
9347 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9348 putPaddrLow(dmabuf->phys);
9349 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9350 putPaddrHigh(dmabuf->phys);
9351 }
9352 mbox->vport = phba->pport;
9353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9354 mbox->context1 = NULL;
9355 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9356 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9359 if (shdr_status || shdr_add_status || rc) {
9360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9361 "2500 EQ_CREATE mailbox failed with "
9362 "status x%x add_status x%x, mbx status x%x\n",
9363 shdr_status, shdr_add_status, rc);
9364 status = -ENXIO;
9365 }
9366 eq->type = LPFC_EQ;
9367 eq->subtype = LPFC_NONE;
9368 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9369 if (eq->queue_id == 0xFFFF)
9370 status = -ENXIO;
9371 eq->host_index = 0;
9372 eq->hba_index = 0;
9373
9374 if (rc != MBX_TIMEOUT)
9375 mempool_free(mbox, phba->mbox_mem_pool);
9376 return status;
9377}
9378
9379/**
9380 * lpfc_cq_create - Create a Completion Queue on the HBA
9381 * @phba: HBA structure that indicates port to create a queue on.
9382 * @cq: The queue structure to use to create the completion queue.
9383 * @eq: The event queue to bind this completion queue to.
9384 *
9385 * This function creates a completion queue, as detailed in @wq, on a port,
9386 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9387 *
9388 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9389 * is used to get the entry count and entry size that are necessary to
9390 * determine the number of pages to allocate and use for this queue. The @eq
9391 * is used to indicate which event queue to bind this completion queue to. This
9392 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9393 * completion queue. This function is asynchronous and will wait for the mailbox
9394 * command to finish before continuing.
9395 *
9396 * On success this function will return a zero. If unable to allocate enough
9397 * memory this function will return ENOMEM. If the queue create mailbox command
9398 * fails this function will return ENXIO.
9399 **/
9400uint32_t
9401lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9402 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9403{
9404 struct lpfc_mbx_cq_create *cq_create;
9405 struct lpfc_dmabuf *dmabuf;
9406 LPFC_MBOXQ_t *mbox;
9407 int rc, length, status = 0;
9408 uint32_t shdr_status, shdr_add_status;
9409 union lpfc_sli4_cfg_shdr *shdr;
9410
9411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9412 if (!mbox)
9413 return -ENOMEM;
9414 length = (sizeof(struct lpfc_mbx_cq_create) -
9415 sizeof(struct lpfc_sli4_cfg_mhdr));
9416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9417 LPFC_MBOX_OPCODE_CQ_CREATE,
9418 length, LPFC_SLI4_MBX_EMBED);
9419 cq_create = &mbox->u.mqe.un.cq_create;
9420 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9421 cq->page_count);
9422 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9423 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9424 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9425 switch (cq->entry_count) {
9426 default:
9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9428 "0361 Unsupported CQ count. (%d)\n",
9429 cq->entry_count);
9430 if (cq->entry_count < 256)
9431 return -EINVAL;
9432 /* otherwise default to smallest count (drop through) */
9433 case 256:
9434 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9435 LPFC_CQ_CNT_256);
9436 break;
9437 case 512:
9438 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9439 LPFC_CQ_CNT_512);
9440 break;
9441 case 1024:
9442 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9443 LPFC_CQ_CNT_1024);
9444 break;
9445 }
9446 list_for_each_entry(dmabuf, &cq->page_list, list) {
9447 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9448 putPaddrLow(dmabuf->phys);
9449 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9450 putPaddrHigh(dmabuf->phys);
9451 }
9452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9453
9454 /* The IOCTL status is embedded in the mailbox subheader. */
9455 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9458 if (shdr_status || shdr_add_status || rc) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "2501 CQ_CREATE mailbox failed with "
9461 "status x%x add_status x%x, mbx status x%x\n",
9462 shdr_status, shdr_add_status, rc);
9463 status = -ENXIO;
9464 goto out;
9465 }
9466 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9467 if (cq->queue_id == 0xFFFF) {
9468 status = -ENXIO;
9469 goto out;
9470 }
9471 /* link the cq onto the parent eq child list */
9472 list_add_tail(&cq->list, &eq->child_list);
9473 /* Set up completion queue's type and subtype */
9474 cq->type = type;
9475 cq->subtype = subtype;
9476 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9477 cq->host_index = 0;
9478 cq->hba_index = 0;
9479out:
9480
9481 if (rc != MBX_TIMEOUT)
9482 mempool_free(mbox, phba->mbox_mem_pool);
9483 return status;
9484}
9485
9486/**
9487 * lpfc_mq_create - Create a mailbox Queue on the HBA
9488 * @phba: HBA structure that indicates port to create a queue on.
9489 * @mq: The queue structure to use to create the mailbox queue.
9490 *
9491 * This function creates a mailbox queue, as detailed in @mq, on a port,
9492 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9493 *
9494 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9495 * is used to get the entry count and entry size that are necessary to
9496 * determine the number of pages to allocate and use for this queue. This
9497 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9498 * mailbox queue. This function is asynchronous and will wait for the mailbox
9499 * command to finish before continuing.
9500 *
9501 * On success this function will return a zero. If unable to allocate enough
9502 * memory this function will return ENOMEM. If the queue create mailbox command
9503 * fails this function will return ENXIO.
9504 **/
9505uint32_t
9506lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9507 struct lpfc_queue *cq, uint32_t subtype)
9508{
9509 struct lpfc_mbx_mq_create *mq_create;
9510 struct lpfc_dmabuf *dmabuf;
9511 LPFC_MBOXQ_t *mbox;
9512 int rc, length, status = 0;
9513 uint32_t shdr_status, shdr_add_status;
9514 union lpfc_sli4_cfg_shdr *shdr;
9515
9516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9517 if (!mbox)
9518 return -ENOMEM;
9519 length = (sizeof(struct lpfc_mbx_mq_create) -
9520 sizeof(struct lpfc_sli4_cfg_mhdr));
9521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9522 LPFC_MBOX_OPCODE_MQ_CREATE,
9523 length, LPFC_SLI4_MBX_EMBED);
9524 mq_create = &mbox->u.mqe.un.mq_create;
9525 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9526 mq->page_count);
9527 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9528 cq->queue_id);
9529 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9530 switch (mq->entry_count) {
9531 default:
9532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9533 "0362 Unsupported MQ count. (%d)\n",
9534 mq->entry_count);
9535 if (mq->entry_count < 16)
9536 return -EINVAL;
9537 /* otherwise default to smallest count (drop through) */
9538 case 16:
9539 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9540 LPFC_MQ_CNT_16);
9541 break;
9542 case 32:
9543 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9544 LPFC_MQ_CNT_32);
9545 break;
9546 case 64:
9547 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9548 LPFC_MQ_CNT_64);
9549 break;
9550 case 128:
9551 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9552 LPFC_MQ_CNT_128);
9553 break;
9554 }
9555 list_for_each_entry(dmabuf, &mq->page_list, list) {
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9557 putPaddrLow(dmabuf->phys);
9558 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9559 putPaddrHigh(dmabuf->phys);
9560 }
9561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9562 /* The IOCTL status is embedded in the mailbox subheader. */
9563 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9566 if (shdr_status || shdr_add_status || rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "2502 MQ_CREATE mailbox failed with "
9569 "status x%x add_status x%x, mbx status x%x\n",
9570 shdr_status, shdr_add_status, rc);
9571 status = -ENXIO;
9572 goto out;
9573 }
9574 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9575 if (mq->queue_id == 0xFFFF) {
9576 status = -ENXIO;
9577 goto out;
9578 }
9579 mq->type = LPFC_MQ;
9580 mq->subtype = subtype;
9581 mq->host_index = 0;
9582 mq->hba_index = 0;
9583
9584 /* link the mq onto the parent cq child list */
9585 list_add_tail(&mq->list, &cq->child_list);
9586out:
9587 if (rc != MBX_TIMEOUT)
9588 mempool_free(mbox, phba->mbox_mem_pool);
9589 return status;
9590}
9591
9592/**
9593 * lpfc_wq_create - Create a Work Queue on the HBA
9594 * @phba: HBA structure that indicates port to create a queue on.
9595 * @wq: The queue structure to use to create the work queue.
9596 * @cq: The completion queue to bind this work queue to.
9597 * @subtype: The subtype of the work queue indicating its functionality.
9598 *
9599 * This function creates a work queue, as detailed in @wq, on a port, described
9600 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9601 *
9602 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9603 * is used to get the entry count and entry size that are necessary to
9604 * determine the number of pages to allocate and use for this queue. The @cq
9605 * is used to indicate which completion queue to bind this work queue to. This
9606 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9607 * work queue. This function is asynchronous and will wait for the mailbox
9608 * command to finish before continuing.
9609 *
9610 * On success this function will return a zero. If unable to allocate enough
9611 * memory this function will return ENOMEM. If the queue create mailbox command
9612 * fails this function will return ENXIO.
9613 **/
9614uint32_t
9615lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9616 struct lpfc_queue *cq, uint32_t subtype)
9617{
9618 struct lpfc_mbx_wq_create *wq_create;
9619 struct lpfc_dmabuf *dmabuf;
9620 LPFC_MBOXQ_t *mbox;
9621 int rc, length, status = 0;
9622 uint32_t shdr_status, shdr_add_status;
9623 union lpfc_sli4_cfg_shdr *shdr;
9624
9625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9626 if (!mbox)
9627 return -ENOMEM;
9628 length = (sizeof(struct lpfc_mbx_wq_create) -
9629 sizeof(struct lpfc_sli4_cfg_mhdr));
9630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9631 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9632 length, LPFC_SLI4_MBX_EMBED);
9633 wq_create = &mbox->u.mqe.un.wq_create;
9634 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9635 wq->page_count);
9636 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9637 cq->queue_id);
9638 list_for_each_entry(dmabuf, &wq->page_list, list) {
9639 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9640 putPaddrLow(dmabuf->phys);
9641 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9642 putPaddrHigh(dmabuf->phys);
9643 }
9644 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9645 /* The IOCTL status is embedded in the mailbox subheader. */
9646 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9649 if (shdr_status || shdr_add_status || rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "2503 WQ_CREATE mailbox failed with "
9652 "status x%x add_status x%x, mbx status x%x\n",
9653 shdr_status, shdr_add_status, rc);
9654 status = -ENXIO;
9655 goto out;
9656 }
9657 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9658 if (wq->queue_id == 0xFFFF) {
9659 status = -ENXIO;
9660 goto out;
9661 }
9662 wq->type = LPFC_WQ;
9663 wq->subtype = subtype;
9664 wq->host_index = 0;
9665 wq->hba_index = 0;
9666
9667 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list);
9669out:
9670 if (rc == MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status;
9673}
9674
9675/**
9676 * lpfc_rq_create - Create a Receive Queue on the HBA
9677 * @phba: HBA structure that indicates port to create a queue on.
9678 * @hrq: The queue structure to use to create the header receive queue.
9679 * @drq: The queue structure to use to create the data receive queue.
9680 * @cq: The completion queue to bind this work queue to.
9681 *
9682 * This function creates a receive buffer queue pair , as detailed in @hrq and
9683 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9684 * to the HBA.
9685 *
9686 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9687 * struct is used to get the entry count that is necessary to determine the
9688 * number of pages to use for this queue. The @cq is used to indicate which
9689 * completion queue to bind received buffers that are posted to these queues to.
9690 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9691 * receive queue pair. This function is asynchronous and will wait for the
9692 * mailbox command to finish before continuing.
9693 *
9694 * On success this function will return a zero. If unable to allocate enough
9695 * memory this function will return ENOMEM. If the queue create mailbox command
9696 * fails this function will return ENXIO.
9697 **/
9698uint32_t
9699lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9700 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9701{
9702 struct lpfc_mbx_rq_create *rq_create;
9703 struct lpfc_dmabuf *dmabuf;
9704 LPFC_MBOXQ_t *mbox;
9705 int rc, length, status = 0;
9706 uint32_t shdr_status, shdr_add_status;
9707 union lpfc_sli4_cfg_shdr *shdr;
9708
9709 if (hrq->entry_count != drq->entry_count)
9710 return -EINVAL;
9711 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9712 if (!mbox)
9713 return -ENOMEM;
9714 length = (sizeof(struct lpfc_mbx_rq_create) -
9715 sizeof(struct lpfc_sli4_cfg_mhdr));
9716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9717 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9718 length, LPFC_SLI4_MBX_EMBED);
9719 rq_create = &mbox->u.mqe.un.rq_create;
9720 switch (hrq->entry_count) {
9721 default:
9722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9723 "2535 Unsupported RQ count. (%d)\n",
9724 hrq->entry_count);
9725 if (hrq->entry_count < 512)
9726 return -EINVAL;
9727 /* otherwise default to smallest count (drop through) */
9728 case 512:
9729 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9730 LPFC_RQ_RING_SIZE_512);
9731 break;
9732 case 1024:
9733 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9734 LPFC_RQ_RING_SIZE_1024);
9735 break;
9736 case 2048:
9737 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9738 LPFC_RQ_RING_SIZE_2048);
9739 break;
9740 case 4096:
9741 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9742 LPFC_RQ_RING_SIZE_4096);
9743 break;
9744 }
9745 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9746 cq->queue_id);
9747 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9748 hrq->page_count);
9749 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9750 LPFC_HDR_BUF_SIZE);
9751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9752 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9753 putPaddrLow(dmabuf->phys);
9754 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9755 putPaddrHigh(dmabuf->phys);
9756 }
9757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9758 /* The IOCTL status is embedded in the mailbox subheader. */
9759 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9760 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9761 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9762 if (shdr_status || shdr_add_status || rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "2504 RQ_CREATE mailbox failed with "
9765 "status x%x add_status x%x, mbx status x%x\n",
9766 shdr_status, shdr_add_status, rc);
9767 status = -ENXIO;
9768 goto out;
9769 }
9770 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9771 if (hrq->queue_id == 0xFFFF) {
9772 status = -ENXIO;
9773 goto out;
9774 }
9775 hrq->type = LPFC_HRQ;
9776 hrq->subtype = subtype;
9777 hrq->host_index = 0;
9778 hrq->hba_index = 0;
9779
9780 /* now create the data queue */
9781 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9782 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9783 length, LPFC_SLI4_MBX_EMBED);
9784 switch (drq->entry_count) {
9785 default:
9786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9787 "2536 Unsupported RQ count. (%d)\n",
9788 drq->entry_count);
9789 if (drq->entry_count < 512)
9790 return -EINVAL;
9791 /* otherwise default to smallest count (drop through) */
9792 case 512:
9793 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9794 LPFC_RQ_RING_SIZE_512);
9795 break;
9796 case 1024:
9797 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9798 LPFC_RQ_RING_SIZE_1024);
9799 break;
9800 case 2048:
9801 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9802 LPFC_RQ_RING_SIZE_2048);
9803 break;
9804 case 4096:
9805 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9806 LPFC_RQ_RING_SIZE_4096);
9807 break;
9808 }
9809 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9810 cq->queue_id);
9811 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9812 drq->page_count);
9813 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9814 LPFC_DATA_BUF_SIZE);
9815 list_for_each_entry(dmabuf, &drq->page_list, list) {
9816 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9817 putPaddrLow(dmabuf->phys);
9818 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9819 putPaddrHigh(dmabuf->phys);
9820 }
9821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9822 /* The IOCTL status is embedded in the mailbox subheader. */
9823 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9826 if (shdr_status || shdr_add_status || rc) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9831 if (drq->queue_id == 0xFFFF) {
9832 status = -ENXIO;
9833 goto out;
9834 }
9835 drq->type = LPFC_DRQ;
9836 drq->subtype = subtype;
9837 drq->host_index = 0;
9838 drq->hba_index = 0;
9839
9840 /* link the header and data RQs onto the parent cq child list */
9841 list_add_tail(&hrq->list, &cq->child_list);
9842 list_add_tail(&drq->list, &cq->child_list);
9843
9844out:
9845 if (rc != MBX_TIMEOUT)
9846 mempool_free(mbox, phba->mbox_mem_pool);
9847 return status;
9848}
9849
9850/**
9851 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9852 * @eq: The queue structure associated with the queue to destroy.
9853 *
9854 * This function destroys a queue, as detailed in @eq by sending an mailbox
9855 * command, specific to the type of queue, to the HBA.
9856 *
9857 * The @eq struct is used to get the queue ID of the queue to destroy.
9858 *
9859 * On success this function will return a zero. If the queue destroy mailbox
9860 * command fails this function will return ENXIO.
9861 **/
9862uint32_t
9863lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9864{
9865 LPFC_MBOXQ_t *mbox;
9866 int rc, length, status = 0;
9867 uint32_t shdr_status, shdr_add_status;
9868 union lpfc_sli4_cfg_shdr *shdr;
9869
9870 if (!eq)
9871 return -ENODEV;
9872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9873 if (!mbox)
9874 return -ENOMEM;
9875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
9876 sizeof(struct lpfc_sli4_cfg_mhdr));
9877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9878 LPFC_MBOX_OPCODE_EQ_DESTROY,
9879 length, LPFC_SLI4_MBX_EMBED);
9880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9881 eq->queue_id);
9882 mbox->vport = eq->phba->pport;
9883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9884
9885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *)
9888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9891 if (shdr_status || shdr_add_status || rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9893 "2505 EQ_DESTROY mailbox failed with "
9894 "status x%x add_status x%x, mbx status x%x\n",
9895 shdr_status, shdr_add_status, rc);
9896 status = -ENXIO;
9897 }
9898
9899 /* Remove eq from any list */
9900 list_del_init(&eq->list);
9901 if (rc != MBX_TIMEOUT)
9902 mempool_free(mbox, eq->phba->mbox_mem_pool);
9903 return status;
9904}
9905
9906/**
9907 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
9908 * @cq: The queue structure associated with the queue to destroy.
9909 *
9910 * This function destroys a queue, as detailed in @cq by sending an mailbox
9911 * command, specific to the type of queue, to the HBA.
9912 *
9913 * The @cq struct is used to get the queue ID of the queue to destroy.
9914 *
9915 * On success this function will return a zero. If the queue destroy mailbox
9916 * command fails this function will return ENXIO.
9917 **/
9918uint32_t
9919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9920{
9921 LPFC_MBOXQ_t *mbox;
9922 int rc, length, status = 0;
9923 uint32_t shdr_status, shdr_add_status;
9924 union lpfc_sli4_cfg_shdr *shdr;
9925
9926 if (!cq)
9927 return -ENODEV;
9928 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
9929 if (!mbox)
9930 return -ENOMEM;
9931 length = (sizeof(struct lpfc_mbx_cq_destroy) -
9932 sizeof(struct lpfc_sli4_cfg_mhdr));
9933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9934 LPFC_MBOX_OPCODE_CQ_DESTROY,
9935 length, LPFC_SLI4_MBX_EMBED);
9936 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
9937 cq->queue_id);
9938 mbox->vport = cq->phba->pport;
9939 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9940 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
9941 /* The IOCTL status is embedded in the mailbox subheader. */
9942 shdr = (union lpfc_sli4_cfg_shdr *)
9943 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
9944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9946 if (shdr_status || shdr_add_status || rc) {
9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9948 "2506 CQ_DESTROY mailbox failed with "
9949 "status x%x add_status x%x, mbx status x%x\n",
9950 shdr_status, shdr_add_status, rc);
9951 status = -ENXIO;
9952 }
9953 /* Remove cq from any list */
9954 list_del_init(&cq->list);
9955 if (rc != MBX_TIMEOUT)
9956 mempool_free(mbox, cq->phba->mbox_mem_pool);
9957 return status;
9958}
9959
9960/**
9961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9962 * @qm: The queue structure associated with the queue to destroy.
9963 *
9964 * This function destroys a queue, as detailed in @mq by sending an mailbox
9965 * command, specific to the type of queue, to the HBA.
9966 *
9967 * The @mq struct is used to get the queue ID of the queue to destroy.
9968 *
9969 * On success this function will return a zero. If the queue destroy mailbox
9970 * command fails this function will return ENXIO.
9971 **/
9972uint32_t
9973lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9974{
9975 LPFC_MBOXQ_t *mbox;
9976 int rc, length, status = 0;
9977 uint32_t shdr_status, shdr_add_status;
9978 union lpfc_sli4_cfg_shdr *shdr;
9979
9980 if (!mq)
9981 return -ENODEV;
9982 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9983 if (!mbox)
9984 return -ENOMEM;
9985 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_MQ_DESTROY,
9989 length, LPFC_SLI4_MBX_EMBED);
9990 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9991 mq->queue_id);
9992 mbox->vport = mq->phba->pport;
9993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9994 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9995 /* The IOCTL status is embedded in the mailbox subheader. */
9996 shdr = (union lpfc_sli4_cfg_shdr *)
9997 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10000 if (shdr_status || shdr_add_status || rc) {
10001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10002 "2507 MQ_DESTROY mailbox failed with "
10003 "status x%x add_status x%x, mbx status x%x\n",
10004 shdr_status, shdr_add_status, rc);
10005 status = -ENXIO;
10006 }
10007 /* Remove mq from any list */
10008 list_del_init(&mq->list);
10009 if (rc != MBX_TIMEOUT)
10010 mempool_free(mbox, mq->phba->mbox_mem_pool);
10011 return status;
10012}
10013
10014/**
10015 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10016 * @wq: The queue structure associated with the queue to destroy.
10017 *
10018 * This function destroys a queue, as detailed in @wq by sending an mailbox
10019 * command, specific to the type of queue, to the HBA.
10020 *
10021 * The @wq struct is used to get the queue ID of the queue to destroy.
10022 *
10023 * On success this function will return a zero. If the queue destroy mailbox
10024 * command fails this function will return ENXIO.
10025 **/
10026uint32_t
10027lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10028{
10029 LPFC_MBOXQ_t *mbox;
10030 int rc, length, status = 0;
10031 uint32_t shdr_status, shdr_add_status;
10032 union lpfc_sli4_cfg_shdr *shdr;
10033
10034 if (!wq)
10035 return -ENODEV;
10036 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10037 if (!mbox)
10038 return -ENOMEM;
10039 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10040 sizeof(struct lpfc_sli4_cfg_mhdr));
10041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10042 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10043 length, LPFC_SLI4_MBX_EMBED);
10044 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10045 wq->queue_id);
10046 mbox->vport = wq->phba->pport;
10047 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10048 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10049 shdr = (union lpfc_sli4_cfg_shdr *)
10050 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10053 if (shdr_status || shdr_add_status || rc) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10055 "2508 WQ_DESTROY mailbox failed with "
10056 "status x%x add_status x%x, mbx status x%x\n",
10057 shdr_status, shdr_add_status, rc);
10058 status = -ENXIO;
10059 }
10060 /* Remove wq from any list */
10061 list_del_init(&wq->list);
10062 if (rc != MBX_TIMEOUT)
10063 mempool_free(mbox, wq->phba->mbox_mem_pool);
10064 return status;
10065}
10066
10067/**
10068 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10069 * @rq: The queue structure associated with the queue to destroy.
10070 *
10071 * This function destroys a queue, as detailed in @rq by sending an mailbox
10072 * command, specific to the type of queue, to the HBA.
10073 *
10074 * The @rq struct is used to get the queue ID of the queue to destroy.
10075 *
10076 * On success this function will return a zero. If the queue destroy mailbox
10077 * command fails this function will return ENXIO.
10078 **/
10079uint32_t
10080lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10081 struct lpfc_queue *drq)
10082{
10083 LPFC_MBOXQ_t *mbox;
10084 int rc, length, status = 0;
10085 uint32_t shdr_status, shdr_add_status;
10086 union lpfc_sli4_cfg_shdr *shdr;
10087
10088 if (!hrq || !drq)
10089 return -ENODEV;
10090 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10091 if (!mbox)
10092 return -ENOMEM;
10093 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10094 sizeof(struct mbox_header));
10095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10096 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10097 length, LPFC_SLI4_MBX_EMBED);
10098 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10099 hrq->queue_id);
10100 mbox->vport = hrq->phba->pport;
10101 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10102 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10103 /* The IOCTL status is embedded in the mailbox subheader. */
10104 shdr = (union lpfc_sli4_cfg_shdr *)
10105 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10106 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10107 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10108 if (shdr_status || shdr_add_status || rc) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10110 "2509 RQ_DESTROY mailbox failed with "
10111 "status x%x add_status x%x, mbx status x%x\n",
10112 shdr_status, shdr_add_status, rc);
10113 if (rc != MBX_TIMEOUT)
10114 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10115 return -ENXIO;
10116 }
10117 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10118 drq->queue_id);
10119 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10120 shdr = (union lpfc_sli4_cfg_shdr *)
10121 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10122 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10123 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10124 if (shdr_status || shdr_add_status || rc) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10126 "2510 RQ_DESTROY mailbox failed with "
10127 "status x%x add_status x%x, mbx status x%x\n",
10128 shdr_status, shdr_add_status, rc);
10129 status = -ENXIO;
10130 }
10131 list_del_init(&hrq->list);
10132 list_del_init(&drq->list);
10133 if (rc != MBX_TIMEOUT)
10134 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10135 return status;
10136}
10137
10138/**
10139 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10140 * @phba: The virtual port for which this call being executed.
10141 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10142 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10143 * @xritag: the xritag that ties this io to the SGL pages.
10144 *
10145 * This routine will post the sgl pages for the IO that has the xritag
10146 * that is in the iocbq structure. The xritag is assigned during iocbq
10147 * creation and persists for as long as the driver is loaded.
10148 * if the caller has fewer than 256 scatter gather segments to map then
10149 * pdma_phys_addr1 should be 0.
10150 * If the caller needs to map more than 256 scatter gather segment then
10151 * pdma_phys_addr1 should be a valid physical address.
10152 * physical address for SGLs must be 64 byte aligned.
10153 * If you are going to map 2 SGL's then the first one must have 256 entries
10154 * the second sgl can have between 1 and 256 entries.
10155 *
10156 * Return codes:
10157 * 0 - Success
10158 * -ENXIO, -ENOMEM - Failure
10159 **/
10160int
10161lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10162 dma_addr_t pdma_phys_addr0,
10163 dma_addr_t pdma_phys_addr1,
10164 uint16_t xritag)
10165{
10166 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10167 LPFC_MBOXQ_t *mbox;
10168 int rc;
10169 uint32_t shdr_status, shdr_add_status;
10170 union lpfc_sli4_cfg_shdr *shdr;
10171
10172 if (xritag == NO_XRI) {
10173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10174 "0364 Invalid param:\n");
10175 return -EINVAL;
10176 }
10177
10178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10179 if (!mbox)
10180 return -ENOMEM;
10181
10182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10183 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10184 sizeof(struct lpfc_mbx_post_sgl_pages) -
10185 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10186
10187 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10188 &mbox->u.mqe.un.post_sgl_pages;
10189 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10190 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10191
10192 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10193 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10194 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10195 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10196
10197 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10198 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10199 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10200 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10201 if (!phba->sli4_hba.intr_enable)
10202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10203 else
10204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10205 /* The IOCTL status is embedded in the mailbox subheader. */
10206 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10209 if (rc != MBX_TIMEOUT)
10210 mempool_free(mbox, phba->mbox_mem_pool);
10211 if (shdr_status || shdr_add_status || rc) {
10212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10213 "2511 POST_SGL mailbox failed with "
10214 "status x%x add_status x%x, mbx status x%x\n",
10215 shdr_status, shdr_add_status, rc);
10216 rc = -ENXIO;
10217 }
10218 return 0;
10219}
10220/**
10221 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10222 * @phba: The virtual port for which this call being executed.
10223 *
10224 * This routine will remove all of the sgl pages registered with the hba.
10225 *
10226 * Return codes:
10227 * 0 - Success
10228 * -ENXIO, -ENOMEM - Failure
10229 **/
10230int
10231lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10232{
10233 LPFC_MBOXQ_t *mbox;
10234 int rc;
10235 uint32_t shdr_status, shdr_add_status;
10236 union lpfc_sli4_cfg_shdr *shdr;
10237
10238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10239 if (!mbox)
10240 return -ENOMEM;
10241
10242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10243 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10244 LPFC_SLI4_MBX_EMBED);
10245 if (!phba->sli4_hba.intr_enable)
10246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10247 else
10248 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10249 /* The IOCTL status is embedded in the mailbox subheader. */
10250 shdr = (union lpfc_sli4_cfg_shdr *)
10251 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10254 if (rc != MBX_TIMEOUT)
10255 mempool_free(mbox, phba->mbox_mem_pool);
10256 if (shdr_status || shdr_add_status || rc) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10258 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10259 "status x%x add_status x%x, mbx status x%x\n",
10260 shdr_status, shdr_add_status, rc);
10261 rc = -ENXIO;
10262 }
10263 return rc;
10264}
10265
10266/**
10267 * lpfc_sli4_next_xritag - Get an xritag for the io
10268 * @phba: Pointer to HBA context object.
10269 *
10270 * This function gets an xritag for the iocb. If there is no unused xritag
10271 * it will return 0xffff.
10272 * The function returns the allocated xritag if successful, else returns zero.
10273 * Zero is not a valid xritag.
10274 * The caller is not required to hold any lock.
10275 **/
10276uint16_t
10277lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10278{
10279 uint16_t xritag;
10280
10281 spin_lock_irq(&phba->hbalock);
10282 xritag = phba->sli4_hba.next_xri;
10283 if ((xritag != (uint16_t) -1) && xritag <
10284 (phba->sli4_hba.max_cfg_param.max_xri
10285 + phba->sli4_hba.max_cfg_param.xri_base)) {
10286 phba->sli4_hba.next_xri++;
10287 phba->sli4_hba.max_cfg_param.xri_used++;
10288 spin_unlock_irq(&phba->hbalock);
10289 return xritag;
10290 }
10291 spin_unlock_irq(&phba->hbalock);
10292
10293 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10294 "2004 Failed to allocate XRI.last XRITAG is %d"
10295 " Max XRI is %d, Used XRI is %d\n",
10296 phba->sli4_hba.next_xri,
10297 phba->sli4_hba.max_cfg_param.max_xri,
10298 phba->sli4_hba.max_cfg_param.xri_used);
10299 return -1;
10300}
10301
10302/**
10303 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10304 * @phba: pointer to lpfc hba data structure.
10305 *
10306 * This routine is invoked to post a block of driver's sgl pages to the
10307 * HBA using non-embedded mailbox command. No Lock is held. This routine
10308 * is only called when the driver is loading and after all IO has been
10309 * stopped.
10310 **/
10311int
10312lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10313{
10314 struct lpfc_sglq *sglq_entry;
10315 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10316 struct sgl_page_pairs *sgl_pg_pairs;
10317 void *viraddr;
10318 LPFC_MBOXQ_t *mbox;
10319 uint32_t reqlen, alloclen, pg_pairs;
10320 uint32_t mbox_tmo;
10321 uint16_t xritag_start = 0;
10322 int els_xri_cnt, rc = 0;
10323 uint32_t shdr_status, shdr_add_status;
10324 union lpfc_sli4_cfg_shdr *shdr;
10325
10326 /* The number of sgls to be posted */
10327 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10328
10329 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10330 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10331 if (reqlen > PAGE_SIZE) {
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10333 "2559 Block sgl registration required DMA "
10334 "size (%d) great than a page\n", reqlen);
10335 return -ENOMEM;
10336 }
10337 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10338 if (!mbox) {
10339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10340 "2560 Failed to allocate mbox cmd memory\n");
10341 return -ENOMEM;
10342 }
10343
10344 /* Allocate DMA memory and set up the non-embedded mailbox command */
10345 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10346 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10347 LPFC_SLI4_MBX_NEMBED);
10348
10349 if (alloclen < reqlen) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "0285 Allocated DMA memory size (%d) is "
10352 "less than the requested DMA memory "
10353 "size (%d)\n", alloclen, reqlen);
10354 lpfc_sli4_mbox_cmd_free(phba, mbox);
10355 return -ENOMEM;
10356 }
10357
10358 /* Get the first SGE entry from the non-embedded DMA memory */
10359 if (unlikely(!mbox->sge_array)) {
10360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10361 "2525 Failed to get the non-embedded SGE "
10362 "virtual address\n");
10363 lpfc_sli4_mbox_cmd_free(phba, mbox);
10364 return -ENOMEM;
10365 }
10366 viraddr = mbox->sge_array->addr[0];
10367
10368 /* Set up the SGL pages in the non-embedded DMA pages */
10369 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10370 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10371
10372 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10373 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10374 /* Set up the sge entry */
10375 sgl_pg_pairs->sgl_pg0_addr_lo =
10376 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10377 sgl_pg_pairs->sgl_pg0_addr_hi =
10378 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10379 sgl_pg_pairs->sgl_pg1_addr_lo =
10380 cpu_to_le32(putPaddrLow(0));
10381 sgl_pg_pairs->sgl_pg1_addr_hi =
10382 cpu_to_le32(putPaddrHigh(0));
10383 /* Keep the first xritag on the list */
10384 if (pg_pairs == 0)
10385 xritag_start = sglq_entry->sli4_xritag;
10386 sgl_pg_pairs++;
10387 }
10388 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10389 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10391 /* Perform endian conversion if necessary */
10392 sgl->word0 = cpu_to_le32(sgl->word0);
10393
10394 if (!phba->sli4_hba.intr_enable)
10395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10396 else {
10397 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10398 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10399 }
10400 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10403 if (rc != MBX_TIMEOUT)
10404 lpfc_sli4_mbox_cmd_free(phba, mbox);
10405 if (shdr_status || shdr_add_status || rc) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10407 "2513 POST_SGL_BLOCK mailbox command failed "
10408 "status x%x add_status x%x mbx status x%x\n",
10409 shdr_status, shdr_add_status, rc);
10410 rc = -ENXIO;
10411 }
10412 return rc;
10413}
10414
10415/**
10416 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10417 * @phba: pointer to lpfc hba data structure.
10418 * @sblist: pointer to scsi buffer list.
10419 * @count: number of scsi buffers on the list.
10420 *
10421 * This routine is invoked to post a block of @count scsi sgl pages from a
10422 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10423 * No Lock is held.
10424 *
10425 **/
10426int
10427lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10428 int cnt)
10429{
10430 struct lpfc_scsi_buf *psb;
10431 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10432 struct sgl_page_pairs *sgl_pg_pairs;
10433 void *viraddr;
10434 LPFC_MBOXQ_t *mbox;
10435 uint32_t reqlen, alloclen, pg_pairs;
10436 uint32_t mbox_tmo;
10437 uint16_t xritag_start = 0;
10438 int rc = 0;
10439 uint32_t shdr_status, shdr_add_status;
10440 dma_addr_t pdma_phys_bpl1;
10441 union lpfc_sli4_cfg_shdr *shdr;
10442
10443 /* Calculate the requested length of the dma memory */
10444 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10445 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10446 if (reqlen > PAGE_SIZE) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0217 Block sgl registration required DMA "
10449 "size (%d) great than a page\n", reqlen);
10450 return -ENOMEM;
10451 }
10452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10453 if (!mbox) {
10454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10455 "0283 Failed to allocate mbox cmd memory\n");
10456 return -ENOMEM;
10457 }
10458
10459 /* Allocate DMA memory and set up the non-embedded mailbox command */
10460 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10461 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10462 LPFC_SLI4_MBX_NEMBED);
10463
10464 if (alloclen < reqlen) {
10465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10466 "2561 Allocated DMA memory size (%d) is "
10467 "less than the requested DMA memory "
10468 "size (%d)\n", alloclen, reqlen);
10469 lpfc_sli4_mbox_cmd_free(phba, mbox);
10470 return -ENOMEM;
10471 }
10472
10473 /* Get the first SGE entry from the non-embedded DMA memory */
10474 if (unlikely(!mbox->sge_array)) {
10475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10476 "2565 Failed to get the non-embedded SGE "
10477 "virtual address\n");
10478 lpfc_sli4_mbox_cmd_free(phba, mbox);
10479 return -ENOMEM;
10480 }
10481 viraddr = mbox->sge_array->addr[0];
10482
10483 /* Set up the SGL pages in the non-embedded DMA pages */
10484 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10485 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10486
10487 pg_pairs = 0;
10488 list_for_each_entry(psb, sblist, list) {
10489 /* Set up the sge entry */
10490 sgl_pg_pairs->sgl_pg0_addr_lo =
10491 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10492 sgl_pg_pairs->sgl_pg0_addr_hi =
10493 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10494 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10495 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10496 else
10497 pdma_phys_bpl1 = 0;
10498 sgl_pg_pairs->sgl_pg1_addr_lo =
10499 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10500 sgl_pg_pairs->sgl_pg1_addr_hi =
10501 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10502 /* Keep the first xritag on the list */
10503 if (pg_pairs == 0)
10504 xritag_start = psb->cur_iocbq.sli4_xritag;
10505 sgl_pg_pairs++;
10506 pg_pairs++;
10507 }
10508 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10509 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10510 /* Perform endian conversion if necessary */
10511 sgl->word0 = cpu_to_le32(sgl->word0);
10512
10513 if (!phba->sli4_hba.intr_enable)
10514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10515 else {
10516 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10518 }
10519 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10522 if (rc != MBX_TIMEOUT)
10523 lpfc_sli4_mbox_cmd_free(phba, mbox);
10524 if (shdr_status || shdr_add_status || rc) {
10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10526 "2564 POST_SGL_BLOCK mailbox command failed "
10527 "status x%x add_status x%x mbx status x%x\n",
10528 shdr_status, shdr_add_status, rc);
10529 rc = -ENXIO;
10530 }
10531 return rc;
10532}
10533
10534/**
10535 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10536 * @phba: pointer to lpfc_hba struct that the frame was received on
10537 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10538 *
10539 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10540 * valid type of frame that the LPFC driver will handle. This function will
10541 * return a zero if the frame is a valid frame or a non zero value when the
10542 * frame does not pass the check.
10543 **/
10544static int
10545lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10546{
10547 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10548 char *type_names[] = FC_TYPE_NAMES_INIT;
10549 struct fc_vft_header *fc_vft_hdr;
10550
10551 switch (fc_hdr->fh_r_ctl) {
10552 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10553 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10554 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10555 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10556 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10557 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10558 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10559 case FC_RCTL_DD_CMD_STATUS: /* command status */
10560 case FC_RCTL_ELS_REQ: /* extended link services request */
10561 case FC_RCTL_ELS_REP: /* extended link services reply */
10562 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10563 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10564 case FC_RCTL_BA_NOP: /* basic link service NOP */
10565 case FC_RCTL_BA_ABTS: /* basic link service abort */
10566 case FC_RCTL_BA_RMC: /* remove connection */
10567 case FC_RCTL_BA_ACC: /* basic accept */
10568 case FC_RCTL_BA_RJT: /* basic reject */
10569 case FC_RCTL_BA_PRMT:
10570 case FC_RCTL_ACK_1: /* acknowledge_1 */
10571 case FC_RCTL_ACK_0: /* acknowledge_0 */
10572 case FC_RCTL_P_RJT: /* port reject */
10573 case FC_RCTL_F_RJT: /* fabric reject */
10574 case FC_RCTL_P_BSY: /* port busy */
10575 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10576 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10577 case FC_RCTL_LCR: /* link credit reset */
10578 case FC_RCTL_END: /* end */
10579 break;
10580 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10581 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10582 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10583 return lpfc_fc_frame_check(phba, fc_hdr);
10584 default:
10585 goto drop;
10586 }
10587 switch (fc_hdr->fh_type) {
10588 case FC_TYPE_BLS:
10589 case FC_TYPE_ELS:
10590 case FC_TYPE_FCP:
10591 case FC_TYPE_CT:
10592 break;
10593 case FC_TYPE_IP:
10594 case FC_TYPE_ILS:
10595 default:
10596 goto drop;
10597 }
10598 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10599 "2538 Received frame rctl:%s type:%s\n",
10600 rctl_names[fc_hdr->fh_r_ctl],
10601 type_names[fc_hdr->fh_type]);
10602 return 0;
10603drop:
10604 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10605 "2539 Dropped frame rctl:%s type:%s\n",
10606 rctl_names[fc_hdr->fh_r_ctl],
10607 type_names[fc_hdr->fh_type]);
10608 return 1;
10609}
10610
10611/**
10612 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10613 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10614 *
10615 * This function processes the FC header to retrieve the VFI from the VF
10616 * header, if one exists. This function will return the VFI if one exists
10617 * or 0 if no VSAN Header exists.
10618 **/
10619static uint32_t
10620lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10621{
10622 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10623
10624 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10625 return 0;
10626 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10627}
10628
10629/**
10630 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10631 * @phba: Pointer to the HBA structure to search for the vport on
10632 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10633 * @fcfi: The FC Fabric ID that the frame came from
10634 *
10635 * This function searches the @phba for a vport that matches the content of the
10636 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10637 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10638 * returns the matching vport pointer or NULL if unable to match frame to a
10639 * vport.
10640 **/
10641static struct lpfc_vport *
10642lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10643 uint16_t fcfi)
10644{
10645 struct lpfc_vport **vports;
10646 struct lpfc_vport *vport = NULL;
10647 int i;
10648 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10649 fc_hdr->fh_d_id[1] << 8 |
10650 fc_hdr->fh_d_id[2]);
10651
10652 vports = lpfc_create_vport_work_array(phba);
10653 if (vports != NULL)
10654 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10655 if (phba->fcf.fcfi == fcfi &&
10656 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10657 vports[i]->fc_myDID == did) {
10658 vport = vports[i];
10659 break;
10660 }
10661 }
10662 lpfc_destroy_vport_work_array(phba, vports);
10663 return vport;
10664}
10665
10666/**
10667 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10668 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10669 *
10670 * This function searches through the existing incomplete sequences that have
10671 * been sent to this @vport. If the frame matches one of the incomplete
10672 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10673 * make up that sequence. If no sequence is found that matches this frame then
10674 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10675 * This function returns a pointer to the first dmabuf in the sequence list that
10676 * the frame was linked to.
10677 **/
10678static struct hbq_dmabuf *
10679lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10680{
10681 struct fc_frame_header *new_hdr;
10682 struct fc_frame_header *temp_hdr;
10683 struct lpfc_dmabuf *d_buf;
10684 struct lpfc_dmabuf *h_buf;
10685 struct hbq_dmabuf *seq_dmabuf = NULL;
10686 struct hbq_dmabuf *temp_dmabuf = NULL;
10687
10688 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10689 /* Use the hdr_buf to find the sequence that this frame belongs to */
10690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10695 continue;
10696 /* found a pending sequence that matches this frame */
10697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10698 break;
10699 }
10700 if (!seq_dmabuf) {
10701 /*
10702 * This indicates first frame received for this sequence.
10703 * Queue the buffer on the vport's rcv_buffer_list.
10704 */
10705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10706 return dmabuf;
10707 }
10708 temp_hdr = seq_dmabuf->hbuf.virt;
10709 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10710 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10711 return dmabuf;
10712 }
10713 /* find the correct place in the sequence to insert this frame */
10714 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10715 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10716 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10717 /*
10718 * If the frame's sequence count is greater than the frame on
10719 * the list then insert the frame right after this frame
10720 */
10721 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10722 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10723 return seq_dmabuf;
10724 }
10725 }
10726 return NULL;
10727}
10728
10729/**
10730 * lpfc_seq_complete - Indicates if a sequence is complete
10731 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10732 *
10733 * This function checks the sequence, starting with the frame described by
10734 * @dmabuf, to see if all the frames associated with this sequence are present.
10735 * the frames associated with this sequence are linked to the @dmabuf using the
10736 * dbuf list. This function looks for two major things. 1) That the first frame
10737 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10738 * set. 3) That there are no holes in the sequence count. The function will
10739 * return 1 when the sequence is complete, otherwise it will return 0.
10740 **/
10741static int
10742lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10743{
10744 struct fc_frame_header *hdr;
10745 struct lpfc_dmabuf *d_buf;
10746 struct hbq_dmabuf *seq_dmabuf;
10747 uint32_t fctl;
10748 int seq_count = 0;
10749
10750 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10751 /* make sure first fame of sequence has a sequence count of zero */
10752 if (hdr->fh_seq_cnt != seq_count)
10753 return 0;
10754 fctl = (hdr->fh_f_ctl[0] << 16 |
10755 hdr->fh_f_ctl[1] << 8 |
10756 hdr->fh_f_ctl[2]);
10757 /* If last frame of sequence we can return success. */
10758 if (fctl & FC_FC_END_SEQ)
10759 return 1;
10760 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10761 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10762 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10763 /* If there is a hole in the sequence count then fail. */
10764 if (++seq_count != hdr->fh_seq_cnt)
10765 return 0;
10766 fctl = (hdr->fh_f_ctl[0] << 16 |
10767 hdr->fh_f_ctl[1] << 8 |
10768 hdr->fh_f_ctl[2]);
10769 /* If last frame of sequence we can return success. */
10770 if (fctl & FC_FC_END_SEQ)
10771 return 1;
10772 }
10773 return 0;
10774}
10775
10776/**
10777 * lpfc_prep_seq - Prep sequence for ULP processing
10778 * @vport: Pointer to the vport on which this sequence was received
10779 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10780 *
10781 * This function takes a sequence, described by a list of frames, and creates
10782 * a list of iocbq structures to describe the sequence. This iocbq list will be
10783 * used to issue to the generic unsolicited sequence handler. This routine
10784 * returns a pointer to the first iocbq in the list. If the function is unable
10785 * to allocate an iocbq then it throw out the received frames that were not
10786 * able to be described and return a pointer to the first iocbq. If unable to
10787 * allocate any iocbqs (including the first) this function will return NULL.
10788 **/
10789static struct lpfc_iocbq *
10790lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10791{
10792 struct lpfc_dmabuf *d_buf, *n_buf;
10793 struct lpfc_iocbq *first_iocbq, *iocbq;
10794 struct fc_frame_header *fc_hdr;
10795 uint32_t sid;
10796
10797 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10798 /* remove from receive buffer list */
10799 list_del_init(&seq_dmabuf->hbuf.list);
10800 /* get the Remote Port's SID */
10801 sid = (fc_hdr->fh_s_id[0] << 16 |
10802 fc_hdr->fh_s_id[1] << 8 |
10803 fc_hdr->fh_s_id[2]);
10804 /* Get an iocbq struct to fill in. */
10805 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10806 if (first_iocbq) {
10807 /* Initialize the first IOCB. */
10808 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10809 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10810 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10812 vport->vpi + vport->phba->vpi_base;
10813 /* put the first buffer into the first IOCBq */
10814 first_iocbq->context2 = &seq_dmabuf->dbuf;
10815 first_iocbq->context3 = NULL;
10816 first_iocbq->iocb.ulpBdeCount = 1;
10817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10818 LPFC_DATA_BUF_SIZE;
10819 first_iocbq->iocb.un.rcvels.remoteID = sid;
10820 }
10821 iocbq = first_iocbq;
10822 /*
10823 * Each IOCBq can have two Buffers assigned, so go through the list
10824 * of buffers for this sequence and save two buffers in each IOCBq
10825 */
10826 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10827 if (!iocbq) {
10828 lpfc_in_buf_free(vport->phba, d_buf);
10829 continue;
10830 }
10831 if (!iocbq->context3) {
10832 iocbq->context3 = d_buf;
10833 iocbq->iocb.ulpBdeCount++;
10834 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10835 LPFC_DATA_BUF_SIZE;
10836 } else {
10837 iocbq = lpfc_sli_get_iocbq(vport->phba);
10838 if (!iocbq) {
10839 if (first_iocbq) {
10840 first_iocbq->iocb.ulpStatus =
10841 IOSTAT_FCP_RSP_ERROR;
10842 first_iocbq->iocb.un.ulpWord[4] =
10843 IOERR_NO_RESOURCES;
10844 }
10845 lpfc_in_buf_free(vport->phba, d_buf);
10846 continue;
10847 }
10848 iocbq->context2 = d_buf;
10849 iocbq->context3 = NULL;
10850 iocbq->iocb.ulpBdeCount = 1;
10851 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10852 LPFC_DATA_BUF_SIZE;
10853 iocbq->iocb.un.rcvels.remoteID = sid;
10854 list_add_tail(&iocbq->list, &first_iocbq->list);
10855 }
10856 }
10857 return first_iocbq;
10858}
10859
10860/**
10861 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10862 * @phba: Pointer to HBA context object.
10863 *
10864 * This function is called with no lock held. This function processes all
10865 * the received buffers and gives it to upper layers when a received buffer
10866 * indicates that it is the final frame in the sequence. The interrupt
10867 * service routine processes received buffers at interrupt contexts and adds
10868 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10869 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10870 * appropriate receive function when the final frame in a sequence is received.
10871 **/
10872int
10873lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
10874{
10875 LIST_HEAD(cmplq);
10876 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
10877 struct fc_frame_header *fc_hdr;
10878 struct lpfc_vport *vport;
10879 uint32_t fcfi;
10880 struct lpfc_iocbq *iocbq;
10881
10882 /* Clear hba flag and get all received buffers into the cmplq */
10883 spin_lock_irq(&phba->hbalock);
10884 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
10885 list_splice_init(&phba->rb_pend_list, &cmplq);
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 /* Process each received buffer */
10889 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
10890 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10891 /* check to see if this a valid type of frame */
10892 if (lpfc_fc_frame_check(phba, fc_hdr)) {
10893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10894 continue;
10895 }
10896 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
10897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
10898 if (!vport) {
10899 /* throw out the frame */
10900 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10901 continue;
10902 }
10903 /* Link this frame */
10904 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
10905 if (!seq_dmabuf) {
10906 /* unable to add frame to vport - throw it out */
10907 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10908 continue;
10909 }
10910 /* If not last frame in sequence continue processing frames. */
10911 if (!lpfc_seq_complete(seq_dmabuf)) {
10912 /*
10913 * When saving off frames post a new one and mark this
10914 * frame to be freed when it is finished.
10915 **/
10916 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
10917 dmabuf->tag = -1;
10918 continue;
10919 }
10920 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10921 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
10922 if (!lpfc_complete_unsol_iocb(phba,
10923 &phba->sli.ring[LPFC_ELS_RING],
10924 iocbq, fc_hdr->fh_r_ctl,
10925 fc_hdr->fh_type))
10926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10927 "2540 Ring %d handler: unexpected Rctl "
10928 "x%x Type x%x received\n",
10929 LPFC_ELS_RING,
10930 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
10931 };
10932 return 0;
10933}
10934
10935/**
10936 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
10937 * @phba: pointer to lpfc hba data structure.
10938 *
10939 * This routine is invoked to post rpi header templates to the
10940 * HBA consistent with the SLI-4 interface spec. This routine
10941 * posts a PAGE_SIZE memory region to the port to hold up to
10942 * PAGE_SIZE modulo 64 rpi context headers.
10943 *
10944 * This routine does not require any locks. It's usage is expected
10945 * to be driver load or reset recovery when the driver is
10946 * sequential.
10947 *
10948 * Return codes
10949 * 0 - sucessful
10950 * EIO - The mailbox failed to complete successfully.
10951 * When this error occurs, the driver is not guaranteed
10952 * to have any rpi regions posted to the device and
10953 * must either attempt to repost the regions or take a
10954 * fatal error.
10955 **/
10956int
10957lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
10958{
10959 struct lpfc_rpi_hdr *rpi_page;
10960 uint32_t rc = 0;
10961
10962 /* Post all rpi memory regions to the port. */
10963 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
10964 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
10965 if (rc != MBX_SUCCESS) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10967 "2008 Error %d posting all rpi "
10968 "headers\n", rc);
10969 rc = -EIO;
10970 break;
10971 }
10972 }
10973
10974 return rc;
10975}
10976
10977/**
10978 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
10979 * @phba: pointer to lpfc hba data structure.
10980 * @rpi_page: pointer to the rpi memory region.
10981 *
10982 * This routine is invoked to post a single rpi header to the
10983 * HBA consistent with the SLI-4 interface spec. This memory region
10984 * maps up to 64 rpi context regions.
10985 *
10986 * Return codes
10987 * 0 - sucessful
10988 * ENOMEM - No available memory
10989 * EIO - The mailbox failed to complete successfully.
10990 **/
10991int
10992lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
10993{
10994 LPFC_MBOXQ_t *mboxq;
10995 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
10996 uint32_t rc = 0;
10997 uint32_t mbox_tmo;
10998 uint32_t shdr_status, shdr_add_status;
10999 union lpfc_sli4_cfg_shdr *shdr;
11000
11001 /* The port is notified of the header region via a mailbox command. */
11002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11003 if (!mboxq) {
11004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11005 "2001 Unable to allocate memory for issuing "
11006 "SLI_CONFIG_SPECIAL mailbox command\n");
11007 return -ENOMEM;
11008 }
11009
11010 /* Post all rpi memory regions to the port. */
11011 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11012 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11013 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11014 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11015 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11016 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11017 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11018 hdr_tmpl, rpi_page->page_count);
11019 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11020 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable)
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11030 if (rc != MBX_TIMEOUT)
11031 mempool_free(mboxq, phba->mbox_mem_pool);
11032 if (shdr_status || shdr_add_status || rc) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11034 "2514 POST_RPI_HDR mailbox failed with "
11035 "status x%x add_status x%x, mbx status x%x\n",
11036 shdr_status, shdr_add_status, rc);
11037 rc = -ENXIO;
11038 }
11039 return rc;
11040}
11041
11042/**
11043 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11044 * @phba: pointer to lpfc hba data structure.
11045 *
11046 * This routine is invoked to post rpi header templates to the
11047 * HBA consistent with the SLI-4 interface spec. This routine
11048 * posts a PAGE_SIZE memory region to the port to hold up to
11049 * PAGE_SIZE modulo 64 rpi context headers.
11050 *
11051 * Returns
11052 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11053 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11054 **/
11055int
11056lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11057{
11058 int rpi;
11059 uint16_t max_rpi, rpi_base, rpi_limit;
11060 uint16_t rpi_remaining;
11061 struct lpfc_rpi_hdr *rpi_hdr;
11062
11063 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11064 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11065 rpi_limit = phba->sli4_hba.next_rpi;
11066
11067 /*
11068 * The valid rpi range is not guaranteed to be zero-based. Start
11069 * the search at the rpi_base as reported by the port.
11070 */
11071 spin_lock_irq(&phba->hbalock);
11072 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11073 if (rpi >= rpi_limit || rpi < rpi_base)
11074 rpi = LPFC_RPI_ALLOC_ERROR;
11075 else {
11076 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11077 phba->sli4_hba.max_cfg_param.rpi_used++;
11078 phba->sli4_hba.rpi_count++;
11079 }
11080
11081 /*
11082 * Don't try to allocate more rpi header regions if the device limit
11083 * on available rpis max has been exhausted.
11084 */
11085 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11086 (phba->sli4_hba.rpi_count >= max_rpi)) {
11087 spin_unlock_irq(&phba->hbalock);
11088 return rpi;
11089 }
11090
11091 /*
11092 * If the driver is running low on rpi resources, allocate another
11093 * page now. Note that the next_rpi value is used because
11094 * it represents how many are actually in use whereas max_rpi notes
11095 * how many are supported max by the device.
11096 */
11097 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11098 phba->sli4_hba.rpi_count;
11099 spin_unlock_irq(&phba->hbalock);
11100 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11101 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11102 if (!rpi_hdr) {
11103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11104 "2002 Error Could not grow rpi "
11105 "count\n");
11106 } else {
11107 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11108 }
11109 }
11110
11111 return rpi;
11112}
11113
11114/**
11115 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11116 * @phba: pointer to lpfc hba data structure.
11117 *
11118 * This routine is invoked to release an rpi to the pool of
11119 * available rpis maintained by the driver.
11120 **/
11121void
11122lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11123{
11124 spin_lock_irq(&phba->hbalock);
11125 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11126 phba->sli4_hba.rpi_count--;
11127 phba->sli4_hba.max_cfg_param.rpi_used--;
11128 spin_unlock_irq(&phba->hbalock);
11129}
11130
11131/**
11132 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11133 * @phba: pointer to lpfc hba data structure.
11134 *
11135 * This routine is invoked to remove the memory region that
11136 * provided rpi via a bitmask.
11137 **/
11138void
11139lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11140{
11141 kfree(phba->sli4_hba.rpi_bmask);
11142}
11143
11144/**
11145 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11146 * @phba: pointer to lpfc hba data structure.
11147 *
11148 * This routine is invoked to remove the memory region that
11149 * provided rpi via a bitmask.
11150 **/
11151int
11152lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11153{
11154 LPFC_MBOXQ_t *mboxq;
11155 struct lpfc_hba *phba = ndlp->phba;
11156 int rc;
11157
11158 /* The port is notified of the header region via a mailbox command. */
11159 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11160 if (!mboxq)
11161 return -ENOMEM;
11162
11163 /* Post all rpi memory regions to the port. */
11164 lpfc_resume_rpi(mboxq, ndlp);
11165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11166 if (rc == MBX_NOT_FINISHED) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11168 "2010 Resume RPI Mailbox failed "
11169 "status %d, mbxStatus x%x\n", rc,
11170 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11171 mempool_free(mboxq, phba->mbox_mem_pool);
11172 return -EIO;
11173 }
11174 return 0;
11175}
11176
11177/**
11178 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11179 * @phba: pointer to lpfc hba data structure.
11180 * @vpi: vpi value to activate with the port.
11181 *
11182 * This routine is invoked to activate a vpi with the
11183 * port when the host intends to use vports with a
11184 * nonzero vpi.
11185 *
11186 * Returns:
11187 * 0 success
11188 * -Evalue otherwise
11189 **/
11190int
11191lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11192{
11193 LPFC_MBOXQ_t *mboxq;
11194 int rc = 0;
11195 uint32_t mbox_tmo;
11196
11197 if (vpi == 0)
11198 return -EINVAL;
11199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11200 if (!mboxq)
11201 return -ENOMEM;
11202 lpfc_init_vpi(mboxq, vpi);
11203 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11205 if (rc != MBX_TIMEOUT)
11206 mempool_free(mboxq, phba->mbox_mem_pool);
11207 if (rc != MBX_SUCCESS) {
11208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11209 "2022 INIT VPI Mailbox failed "
11210 "status %d, mbxStatus x%x\n", rc,
11211 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11212 rc = -EIO;
11213 }
11214 return rc;
11215}
11216
11217/**
11218 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11219 * @phba: pointer to lpfc hba data structure.
11220 * @mboxq: Pointer to mailbox object.
11221 *
11222 * This routine is invoked to manually add a single FCF record. The caller
11223 * must pass a completely initialized FCF_Record. This routine takes
11224 * care of the nonembedded mailbox operations.
11225 **/
11226static void
11227lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11228{
11229 void *virt_addr;
11230 union lpfc_sli4_cfg_shdr *shdr;
11231 uint32_t shdr_status, shdr_add_status;
11232
11233 virt_addr = mboxq->sge_array->addr[0];
11234 /* The IOCTL status is embedded in the mailbox subheader. */
11235 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11238
11239 if ((shdr_status || shdr_add_status) &&
11240 (shdr_status != STATUS_FCF_IN_USE))
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "2558 ADD_FCF_RECORD mailbox failed with "
11243 "status x%x add_status x%x\n",
11244 shdr_status, shdr_add_status);
11245
11246 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11247}
11248
11249/**
11250 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11251 * @phba: pointer to lpfc hba data structure.
11252 * @fcf_record: pointer to the initialized fcf record to add.
11253 *
11254 * This routine is invoked to manually add a single FCF record. The caller
11255 * must pass a completely initialized FCF_Record. This routine takes
11256 * care of the nonembedded mailbox operations.
11257 **/
11258int
11259lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11260{
11261 int rc = 0;
11262 LPFC_MBOXQ_t *mboxq;
11263 uint8_t *bytep;
11264 void *virt_addr;
11265 dma_addr_t phys_addr;
11266 struct lpfc_mbx_sge sge;
11267 uint32_t alloc_len, req_len;
11268 uint32_t fcfindex;
11269
11270 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11271 if (!mboxq) {
11272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11273 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11274 return -ENOMEM;
11275 }
11276
11277 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11278 sizeof(uint32_t);
11279
11280 /* Allocate DMA memory and set up the non-embedded mailbox command */
11281 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11282 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11283 req_len, LPFC_SLI4_MBX_NEMBED);
11284 if (alloc_len < req_len) {
11285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11286 "2523 Allocated DMA memory size (x%x) is "
11287 "less than the requested DMA memory "
11288 "size (x%x)\n", alloc_len, req_len);
11289 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11290 return -ENOMEM;
11291 }
11292
11293 /*
11294 * Get the first SGE entry from the non-embedded DMA memory. This
11295 * routine only uses a single SGE.
11296 */
11297 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11298 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11299 if (unlikely(!mboxq->sge_array)) {
11300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11301 "2526 Failed to get the non-embedded SGE "
11302 "virtual address\n");
11303 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11304 return -ENOMEM;
11305 }
11306 virt_addr = mboxq->sge_array->addr[0];
11307 /*
11308 * Configure the FCF record for FCFI 0. This is the driver's
11309 * hardcoded default and gets used in nonFIP mode.
11310 */
11311 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11312 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11313 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11314
11315 /*
11316 * Copy the fcf_index and the FCF Record Data. The data starts after
11317 * the FCoE header plus word10. The data copy needs to be endian
11318 * correct.
11319 */
11320 bytep += sizeof(uint32_t);
11321 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11322 mboxq->vport = phba->pport;
11323 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11324 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11325 if (rc == MBX_NOT_FINISHED) {
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "2515 ADD_FCF_RECORD mailbox failed with "
11328 "status 0x%x\n", rc);
11329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11330 rc = -EIO;
11331 } else
11332 rc = 0;
11333
11334 return rc;
11335}
11336
11337/**
11338 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11339 * @phba: pointer to lpfc hba data structure.
11340 * @fcf_record: pointer to the fcf record to write the default data.
11341 * @fcf_index: FCF table entry index.
11342 *
11343 * This routine is invoked to build the driver's default FCF record. The
11344 * values used are hardcoded. This routine handles memory initialization.
11345 *
11346 **/
11347void
11348lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11349 struct fcf_record *fcf_record,
11350 uint16_t fcf_index)
11351{
11352 memset(fcf_record, 0, sizeof(struct fcf_record));
11353 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11354 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11355 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11356 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11357 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11358 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11359 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11360 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11361 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11362 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11369 /* Set the VLAN bit map */
11370 if (phba->valid_vlan) {
11371 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11372 = 1 << (phba->vlan_id % 8);
11373 }
11374}
11375
11376/**
11377 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11378 * @phba: pointer to lpfc hba data structure.
11379 * @fcf_index: FCF table entry offset.
11380 *
11381 * This routine is invoked to read up to @fcf_num of FCF record from the
11382 * device starting with the given @fcf_index.
11383 **/
11384int
11385lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11386{
11387 int rc = 0, error;
11388 LPFC_MBOXQ_t *mboxq;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 uint8_t *bytep;
11392 struct lpfc_mbx_sge sge;
11393 uint32_t alloc_len, req_len;
11394 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11395
11396 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11397 if (!mboxq) {
11398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11399 "2000 Failed to allocate mbox for "
11400 "READ_FCF cmd\n");
11401 return -ENOMEM;
11402 }
11403
11404 req_len = sizeof(struct fcf_record) +
11405 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11406
11407 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11408 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11409 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11410 LPFC_SLI4_MBX_NEMBED);
11411
11412 if (alloc_len < req_len) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11414 "0291 Allocated DMA memory size (x%x) is "
11415 "less than the requested DMA memory "
11416 "size (x%x)\n", alloc_len, req_len);
11417 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11418 return -ENOMEM;
11419 }
11420
11421 /* Get the first SGE entry from the non-embedded DMA memory. This
11422 * routine only uses a single SGE.
11423 */
11424 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11425 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11426 if (unlikely(!mboxq->sge_array)) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11428 "2527 Failed to get the non-embedded SGE "
11429 "virtual address\n");
11430 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11431 return -ENOMEM;
11432 }
11433 virt_addr = mboxq->sge_array->addr[0];
11434 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11435
11436 /* Set up command fields */
11437 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11438 /* Perform necessary endian conversion */
11439 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11440 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11441 mboxq->vport = phba->pport;
11442 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11444 if (rc == MBX_NOT_FINISHED) {
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 error = -EIO;
11447 } else
11448 error = 0;
11449 return error;
11450}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6..7d37eb7459b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 75 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 76 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 77 struct lpfc_iocbq *);
68 78 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 79};
70 80
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 81#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 91typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 92 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 93 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 94 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 95 MAILBOX_t mb; /* Mailbox cmd */
96 struct lpfc_mqe mqe;
97 } u;
98 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 99 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 100 void *context2; /* caller context information */
88 101
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 102 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 103 uint8_t mbox_flag;
91 104 struct lpfc_mcqe mcqe;
105 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 106} LPFC_MBOXQ_t;
93 107
94#define MBX_POLL 1 /* poll mailbox till command done, then 108#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
230 244
231 /* Additional sli_flags */ 245 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 246#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 247#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 248#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 249#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 250#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
251#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 252
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 253 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 254 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
261 276
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 277#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 278 command */
279#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
280 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 281#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 282 * or erase cmds. This is especially
266 * long because of the potential of 283 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 00000000000..5196b46608d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e307..6b8a148f0a5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.2"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0af..a6313ee84ac 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 740 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 741 struct lpfc_vport **vports;
712 int index = 0; 742 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 743 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 744 GFP_KERNEL);
715 if (vports == NULL) 745 if (vports == NULL)
716 return NULL; 746 return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 764 int i;
735 if (vports == NULL) 765 if (vports == NULL)
736 return; 766 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 767 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 768 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 769 kfree(vports);
740} 770}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 795201fa0b4..512c2cc1a33 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -469,7 +469,7 @@ typedef struct {
469 u8 type; /* Type of the device */ 469 u8 type; /* Type of the device */
470 u8 cur_status; /* current status of the device */ 470 u8 cur_status; /* current status of the device */
471 u8 tag_depth; /* Level of tagging */ 471 u8 tag_depth; /* Level of tagging */
472 u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ 472 u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */
473 u32 size; /* configurable size in terms of 512 byte 473 u32 size; /* configurable size in terms of 512 byte
474 blocks */ 474 blocks */
475}__attribute__ ((packed)) phys_drv; 475}__attribute__ ((packed)) phys_drv;
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 170399ef06f..b25b74764ec 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -686,7 +686,7 @@ typedef struct {
686 * @type : Type of the device 686 * @type : Type of the device
687 * @cur_status : current status of the device 687 * @cur_status : current status of the device
688 * @tag_depth : Level of tagging 688 * @tag_depth : Level of tagging
689 * @sync_neg : sync negotiation - ENABLE or DISBALE 689 * @sync_neg : sync negotiation - ENABLE or DISABLE
690 * @size : configurable size in terms of 512 byte 690 * @size : configurable size in terms of 512 byte
691 */ 691 */
692typedef struct { 692typedef struct {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index babd4cc0cb2..286c185fa9e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
61#include <scsi/scsi_tcq.h> 61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h> 62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h> 63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
64 65
65#include "mpt2sas_debug.h" 66#include "mpt2sas_debug.h"
66 67
@@ -68,10 +69,10 @@
68#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
72#define MPT2SAS_MAJOR_VERSION 00 73#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 75#define MPT2SAS_BUILD_VERSION 03
75#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
76 77
77/* 78/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf..14e473d1fa7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
473} 473}
474 474
475/** 475/**
476 * _ctl_do_task_abort - assign an active smid to the abort_task 476 * _ctl_set_task_mid - assign an active smid to tm request
477 * @ioc: per adapter object 477 * @ioc: per adapter object
478 * @karg - (struct mpt2_ioctl_command) 478 * @karg - (struct mpt2_ioctl_command)
479 * @tm_request - pointer to mf from user space 479 * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
482 * during failure, the reply frame is filled. 482 * during failure, the reply frame is filled.
483 */ 483 */
484static int 484static int
485_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, 485_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
486 Mpi2SCSITaskManagementRequest_t *tm_request) 486 Mpi2SCSITaskManagementRequest_t *tm_request)
487{ 487{
488 u8 found = 0; 488 u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
494 Mpi2SCSITaskManagementReply_t *tm_reply; 494 Mpi2SCSITaskManagementReply_t *tm_reply;
495 u32 sz; 495 u32 sz;
496 u32 lun; 496 u32 lun;
497 char *desc = NULL;
498
499 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
500 desc = "abort_task";
501 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
502 desc = "query_task";
503 else
504 return 0;
497 505
498 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 506 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
499 507
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
517 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 525 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
518 526
519 if (!found) { 527 if (!found) {
520 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 528 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
521 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 529 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
522 tm_request->DevHandle, lun)); 530 desc, tm_request->DevHandle, lun));
523 tm_reply = ioc->ctl_cmds.reply; 531 tm_reply = ioc->ctl_cmds.reply;
524 tm_reply->DevHandle = tm_request->DevHandle; 532 tm_reply->DevHandle = tm_request->DevHandle;
525 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 533 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 534 tm_reply->TaskType = tm_request->TaskType;
527 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 535 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
528 tm_reply->VP_ID = tm_request->VP_ID; 536 tm_reply->VP_ID = tm_request->VP_ID;
529 tm_reply->VF_ID = tm_request->VF_ID; 537 tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
535 return 1; 543 return 1;
536 } 544 }
537 545
538 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 546 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
539 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, 547 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
540 tm_request->DevHandle, lun, tm_request->TaskMID)); 548 desc, tm_request->DevHandle, lun, tm_request->TaskMID));
541 return 0; 549 return 0;
542} 550}
543 551
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 747 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
740 748
741 if (tm_request->TaskType == 749 if (tm_request->TaskType ==
742 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 750 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
743 if (_ctl_do_task_abort(ioc, &karg, tm_request)) { 751 tm_request->TaskType ==
752 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
753 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
744 mpt2sas_base_free_smid(ioc, smid); 754 mpt2sas_base_free_smid(ioc, smid);
745 goto out; 755 goto out;
746 } 756 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e..2a01a5f2a84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
197MODULE_DEVICE_TABLE(pci, scsih_pci_table); 197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198 198
199/** 199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level. 200 * _scsih_set_debug_level - global setting of ioc->logging_level.
201 * 201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h. 202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */ 203 */
204static int 204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp) 205_scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{ 206{
207 int ret = param_set_int(val, kp); 207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc; 208 struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
215 ioc->logging_level = logging_level; 215 ioc->logging_level = logging_level;
216 return 0; 216 return 0;
217} 217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int, 218module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
219 &logging_level, 0644); 219 &logging_level, 0644);
220 220
221/** 221/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
884} 884}
885 885
886/** 886/**
887 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
888 * @ioc: per adapter object
889 * @id: target id
890 * @lun: lun number
891 * @channel: channel
892 * Context: This function will acquire ioc->scsi_lookup_lock.
893 *
894 * This will search for a matching channel:id:lun in the scsi_lookup array,
895 * returning 1 if found.
896 */
897static u8
898_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
899 unsigned int lun, int channel)
900{
901 u8 found;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
906 found = 0;
907 for (i = 0 ; i < ioc->request_depth; i++) {
908 if (ioc->scsi_lookup[i].scmd &&
909 (ioc->scsi_lookup[i].scmd->device->id == id &&
910 ioc->scsi_lookup[i].scmd->device->channel == channel &&
911 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
912 found = 1;
913 goto out;
914 }
915 }
916 out:
917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
918 return found;
919}
920
921/**
887 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 922 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
888 * @ioc: per adapter object 923 * @ioc: per adapter object
889 * @smid: system request message index 924 * @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1047} 1082}
1048 1083
1049/** 1084/**
1050 * scsih_change_queue_depth - setting device queue depth 1085 * _scsih_change_queue_depth - setting device queue depth
1051 * @sdev: scsi device struct 1086 * @sdev: scsi device struct
1052 * @qdepth: requested queue depth 1087 * @qdepth: requested queue depth
1053 * 1088 *
1054 * Returns queue depth. 1089 * Returns queue depth.
1055 */ 1090 */
1056static int 1091static int
1057scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1092_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1058{ 1093{
1059 struct Scsi_Host *shost = sdev->host; 1094 struct Scsi_Host *shost = sdev->host;
1060 int max_depth; 1095 int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1079} 1114}
1080 1115
1081/** 1116/**
1082 * scsih_change_queue_depth - changing device queue tag type 1117 * _scsih_change_queue_depth - changing device queue tag type
1083 * @sdev: scsi device struct 1118 * @sdev: scsi device struct
1084 * @tag_type: requested tag type 1119 * @tag_type: requested tag type
1085 * 1120 *
1086 * Returns queue tag type. 1121 * Returns queue tag type.
1087 */ 1122 */
1088static int 1123static int
1089scsih_change_queue_type(struct scsi_device *sdev, int tag_type) 1124_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1090{ 1125{
1091 if (sdev->tagged_supported) { 1126 if (sdev->tagged_supported) {
1092 scsi_set_tag_type(sdev, tag_type); 1127 scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1101} 1136}
1102 1137
1103/** 1138/**
1104 * scsih_target_alloc - target add routine 1139 * _scsih_target_alloc - target add routine
1105 * @starget: scsi target struct 1140 * @starget: scsi target struct
1106 * 1141 *
1107 * Returns 0 if ok. Any other return is assumed to be an error and 1142 * Returns 0 if ok. Any other return is assumed to be an error and
1108 * the device is ignored. 1143 * the device is ignored.
1109 */ 1144 */
1110static int 1145static int
1111scsih_target_alloc(struct scsi_target *starget) 1146_scsih_target_alloc(struct scsi_target *starget)
1112{ 1147{
1113 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1148 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1114 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1149 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
1163} 1198}
1164 1199
1165/** 1200/**
1166 * scsih_target_destroy - target destroy routine 1201 * _scsih_target_destroy - target destroy routine
1167 * @starget: scsi target struct 1202 * @starget: scsi target struct
1168 * 1203 *
1169 * Returns nothing. 1204 * Returns nothing.
1170 */ 1205 */
1171static void 1206static void
1172scsih_target_destroy(struct scsi_target *starget) 1207_scsih_target_destroy(struct scsi_target *starget)
1173{ 1208{
1174 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1175 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1210 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
1212} 1247}
1213 1248
1214/** 1249/**
1215 * scsih_slave_alloc - device add routine 1250 * _scsih_slave_alloc - device add routine
1216 * @sdev: scsi device struct 1251 * @sdev: scsi device struct
1217 * 1252 *
1218 * Returns 0 if ok. Any other return is assumed to be an error and 1253 * Returns 0 if ok. Any other return is assumed to be an error and
1219 * the device is ignored. 1254 * the device is ignored.
1220 */ 1255 */
1221static int 1256static int
1222scsih_slave_alloc(struct scsi_device *sdev) 1257_scsih_slave_alloc(struct scsi_device *sdev)
1223{ 1258{
1224 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1225 struct MPT2SAS_ADAPTER *ioc; 1260 struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
1273} 1308}
1274 1309
1275/** 1310/**
1276 * scsih_slave_destroy - device destroy routine 1311 * _scsih_slave_destroy - device destroy routine
1277 * @sdev: scsi device struct 1312 * @sdev: scsi device struct
1278 * 1313 *
1279 * Returns nothing. 1314 * Returns nothing.
1280 */ 1315 */
1281static void 1316static void
1282scsih_slave_destroy(struct scsi_device *sdev) 1317_scsih_slave_destroy(struct scsi_device *sdev)
1283{ 1318{
1284 struct MPT2SAS_TARGET *sas_target_priv_data; 1319 struct MPT2SAS_TARGET *sas_target_priv_data;
1285 struct scsi_target *starget; 1320 struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
1295} 1330}
1296 1331
1297/** 1332/**
1298 * scsih_display_sata_capabilities - sata capabilities 1333 * _scsih_display_sata_capabilities - sata capabilities
1299 * @ioc: per adapter object 1334 * @ioc: per adapter object
1300 * @sas_device: the sas_device object 1335 * @sas_device: the sas_device object
1301 * @sdev: scsi device struct 1336 * @sdev: scsi device struct
1302 */ 1337 */
1303static void 1338static void
1304scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1339_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1305 struct _sas_device *sas_device, struct scsi_device *sdev) 1340 struct _sas_device *sas_device, struct scsi_device *sdev)
1306{ 1341{
1307 Mpi2ConfigReply_t mpi_reply; 1342 Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1401} 1436}
1402 1437
1403/** 1438/**
1404 * scsih_slave_configure - device configure routine. 1439 * _scsih_slave_configure - device configure routine.
1405 * @sdev: scsi device struct 1440 * @sdev: scsi device struct
1406 * 1441 *
1407 * Returns 0 if ok. Any other return is assumed to be an error and 1442 * Returns 0 if ok. Any other return is assumed to be an error and
1408 * the device is ignored. 1443 * the device is ignored.
1409 */ 1444 */
1410static int 1445static int
1411scsih_slave_configure(struct scsi_device *sdev) 1446_scsih_slave_configure(struct scsi_device *sdev)
1412{ 1447{
1413 struct Scsi_Host *shost = sdev->host; 1448 struct Scsi_Host *shost = sdev->host;
1414 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1449 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1489 r_level, raid_device->handle, 1524 r_level, raid_device->handle,
1490 (unsigned long long)raid_device->wwid, 1525 (unsigned long long)raid_device->wwid,
1491 raid_device->num_pds, ds); 1526 raid_device->num_pds, ds);
1492 scsih_change_queue_depth(sdev, qdepth); 1527 _scsih_change_queue_depth(sdev, qdepth);
1493 return 0; 1528 return 0;
1494 } 1529 }
1495 1530
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
1532 sas_device->slot); 1567 sas_device->slot);
1533 1568
1534 if (!ssp_target) 1569 if (!ssp_target)
1535 scsih_display_sata_capabilities(ioc, sas_device, sdev); 1570 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1536 } 1571 }
1537 1572
1538 scsih_change_queue_depth(sdev, qdepth); 1573 _scsih_change_queue_depth(sdev, qdepth);
1539 1574
1540 if (ssp_target) 1575 if (ssp_target)
1541 sas_read_port_mode_page(sdev); 1576 sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1543} 1578}
1544 1579
1545/** 1580/**
1546 * scsih_bios_param - fetch head, sector, cylinder info for a disk 1581 * _scsih_bios_param - fetch head, sector, cylinder info for a disk
1547 * @sdev: scsi device struct 1582 * @sdev: scsi device struct
1548 * @bdev: pointer to block device context 1583 * @bdev: pointer to block device context
1549 * @capacity: device size (in 512 byte sectors) 1584 * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1555 * Return nothing. 1590 * Return nothing.
1556 */ 1591 */
1557static int 1592static int
1558scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 1593_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1559 sector_t capacity, int params[]) 1594 sector_t capacity, int params[])
1560{ 1595{
1561 int heads; 1596 int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1636} 1671}
1637 1672
1638/** 1673/**
1639 * scsih_tm_done - tm completion routine 1674 * _scsih_tm_done - tm completion routine
1640 * @ioc: per adapter object 1675 * @ioc: per adapter object
1641 * @smid: system request message index 1676 * @smid: system request message index
1642 * @VF_ID: virtual function id 1677 * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1648 * Return nothing. 1683 * Return nothing.
1649 */ 1684 */
1650static void 1685static void
1651scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1686_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1652{ 1687{
1653 MPI2DefaultReply_t *mpi_reply; 1688 MPI2DefaultReply_t *mpi_reply;
1654 1689
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1823} 1858}
1824 1859
1825/** 1860/**
1826 * scsih_abort - eh threads main abort routine 1861 * _scsih_abort - eh threads main abort routine
1827 * @sdev: scsi device struct 1862 * @sdev: scsi device struct
1828 * 1863 *
1829 * Returns SUCCESS if command aborted else FAILED 1864 * Returns SUCCESS if command aborted else FAILED
1830 */ 1865 */
1831static int 1866static int
1832scsih_abort(struct scsi_cmnd *scmd) 1867_scsih_abort(struct scsi_cmnd *scmd)
1833{ 1868{
1834 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 1869 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1835 struct MPT2SAS_DEVICE *sas_device_priv_data; 1870 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
1889 return r; 1924 return r;
1890} 1925}
1891 1926
1927/**
1928 * _scsih_dev_reset - eh threads main device reset routine
1929 * @sdev: scsi device struct
1930 *
1931 * Returns SUCCESS if command aborted else FAILED
1932 */
1933static int
1934_scsih_dev_reset(struct scsi_cmnd *scmd)
1935{
1936 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1937 struct MPT2SAS_DEVICE *sas_device_priv_data;
1938 struct _sas_device *sas_device;
1939 unsigned long flags;
1940 u16 handle;
1941 int r;
1942
1943 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
1944 ioc->name, scmd);
1945 scsi_print_command(scmd);
1946
1947 sas_device_priv_data = scmd->device->hostdata;
1948 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1949 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1950 ioc->name, scmd);
1951 scmd->result = DID_NO_CONNECT << 16;
1952 scmd->scsi_done(scmd);
1953 r = SUCCESS;
1954 goto out;
1955 }
1956
1957 /* for hidden raid components obtain the volume_handle */
1958 handle = 0;
1959 if (sas_device_priv_data->sas_target->flags &
1960 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1961 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1962 sas_device = _scsih_sas_device_find_by_handle(ioc,
1963 sas_device_priv_data->sas_target->handle);
1964 if (sas_device)
1965 handle = sas_device->volume_handle;
1966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1967 } else
1968 handle = sas_device_priv_data->sas_target->handle;
1969
1970 if (!handle) {
1971 scmd->result = DID_RESET << 16;
1972 r = FAILED;
1973 goto out;
1974 }
1975
1976 mutex_lock(&ioc->tm_cmds.mutex);
1977 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1978 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
1979 30);
1980
1981 /*
1982 * sanity check see whether all commands to this device been
1983 * completed
1984 */
1985 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
1986 scmd->device->lun, scmd->device->channel))
1987 r = FAILED;
1988 else
1989 r = SUCCESS;
1990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1991 mutex_unlock(&ioc->tm_cmds.mutex);
1992
1993 out:
1994 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
1995 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1996 return r;
1997}
1892 1998
1893/** 1999/**
1894 * scsih_dev_reset - eh threads main device reset routine 2000 * _scsih_target_reset - eh threads main target reset routine
1895 * @sdev: scsi device struct 2001 * @sdev: scsi device struct
1896 * 2002 *
1897 * Returns SUCCESS if command aborted else FAILED 2003 * Returns SUCCESS if command aborted else FAILED
1898 */ 2004 */
1899static int 2005static int
1900scsih_dev_reset(struct scsi_cmnd *scmd) 2006_scsih_target_reset(struct scsi_cmnd *scmd)
1901{ 2007{
1902 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2008 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1903 struct MPT2SAS_DEVICE *sas_device_priv_data; 2009 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1912 2018
1913 sas_device_priv_data = scmd->device->hostdata; 2019 sas_device_priv_data = scmd->device->hostdata;
1914 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2020 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1915 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2021 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
1916 ioc->name, scmd); 2022 ioc->name, scmd);
1917 scmd->result = DID_NO_CONNECT << 16; 2023 scmd->result = DID_NO_CONNECT << 16;
1918 scmd->scsi_done(scmd); 2024 scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1962} 2068}
1963 2069
1964/** 2070/**
1965 * scsih_abort - eh threads main host reset routine 2071 * _scsih_abort - eh threads main host reset routine
1966 * @sdev: scsi device struct 2072 * @sdev: scsi device struct
1967 * 2073 *
1968 * Returns SUCCESS if command aborted else FAILED 2074 * Returns SUCCESS if command aborted else FAILED
1969 */ 2075 */
1970static int 2076static int
1971scsih_host_reset(struct scsi_cmnd *scmd) 2077_scsih_host_reset(struct scsi_cmnd *scmd)
1972{ 2078{
1973 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2079 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1974 int r, retval; 2080 int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2390} 2496}
2391 2497
2392/** 2498/**
2393 * scsih_qcmd - main scsi request entry point 2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame
2502 *
2503 * Supporting protection 1 and 3.
2504 *
2505 * Returns nothing
2506 */
2507static void
2508_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2509{
2510 u16 eedp_flags;
2511 unsigned char prot_op = scsi_get_prot_op(scmd);
2512 unsigned char prot_type = scsi_get_prot_type(scmd);
2513
2514 if (prot_type == SCSI_PROT_DIF_TYPE0 ||
2515 prot_type == SCSI_PROT_DIF_TYPE2 ||
2516 prot_op == SCSI_PROT_NORMAL)
2517 return;
2518
2519 if (prot_op == SCSI_PROT_READ_STRIP)
2520 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
2521 else if (prot_op == SCSI_PROT_WRITE_INSERT)
2522 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2523 else
2524 return;
2525
2526 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2527
2528 switch (prot_type) {
2529 case SCSI_PROT_DIF_TYPE1:
2530
2531 /*
2532 * enable ref/guard checking
2533 * auto increment ref tag
2534 */
2535 mpi_request->EEDPFlags = eedp_flags |
2536 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2537 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2538 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2539 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2540 cpu_to_be32(scsi_get_lba(scmd));
2541
2542 break;
2543
2544 case SCSI_PROT_DIF_TYPE3:
2545
2546 /*
2547 * enable guard checking
2548 */
2549 mpi_request->EEDPFlags = eedp_flags |
2550 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2551
2552 break;
2553 }
2554}
2555
2556/**
2557 * _scsih_eedp_error_handling - return sense code for EEDP errors
2558 * @scmd: pointer to scsi command object
2559 * @ioc_status: ioc status
2560 *
2561 * Returns nothing
2562 */
2563static void
2564_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
2565{
2566 u8 ascq;
2567 u8 sk;
2568 u8 host_byte;
2569
2570 switch (ioc_status) {
2571 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2572 ascq = 0x01;
2573 break;
2574 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2575 ascq = 0x02;
2576 break;
2577 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2578 ascq = 0x03;
2579 break;
2580 default:
2581 ascq = 0x00;
2582 break;
2583 }
2584
2585 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2586 sk = ILLEGAL_REQUEST;
2587 host_byte = DID_ABORT;
2588 } else {
2589 sk = ABORTED_COMMAND;
2590 host_byte = DID_OK;
2591 }
2592
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
2594 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
2595 SAM_STAT_CHECK_CONDITION;
2596}
2597
2598/**
2599 * _scsih_qcmd - main scsi request entry point
2394 * @scmd: pointer to scsi command object 2600 * @scmd: pointer to scsi command object
2395 * @done: function pointer to be invoked on completion 2601 * @done: function pointer to be invoked on completion
2396 * 2602 *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2401 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 2607 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2402 */ 2608 */
2403static int 2609static int
2404scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 2610_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2405{ 2611{
2406 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2407 struct MPT2SAS_DEVICE *sas_device_priv_data; 2613 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2470 } 2676 }
2471 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2677 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2472 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 2678 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2679 _scsih_setup_eedp(scmd, mpi_request);
2473 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2680 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2474 if (sas_device_priv_data->sas_target->flags & 2681 if (sas_device_priv_data->sas_target->flags &
2475 MPT_TARGET_FLAGS_RAID_COMPONENT) 2682 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2604 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2811 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2605 desc_ioc_state = "scsi ext terminated"; 2812 desc_ioc_state = "scsi ext terminated";
2606 break; 2813 break;
2814 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2815 desc_ioc_state = "eedp guard error";
2816 break;
2817 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2818 desc_ioc_state = "eedp ref tag error";
2819 break;
2820 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2821 desc_ioc_state = "eedp app tag error";
2822 break;
2607 default: 2823 default:
2608 desc_ioc_state = "unknown"; 2824 desc_ioc_state = "unknown";
2609 break; 2825 break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2783} 2999}
2784 3000
2785/** 3001/**
2786 * scsih_io_done - scsi request callback 3002 * _scsih_io_done - scsi request callback
2787 * @ioc: per adapter object 3003 * @ioc: per adapter object
2788 * @smid: system request message index 3004 * @smid: system request message index
2789 * @VF_ID: virtual function id 3005 * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2794 * Return nothing. 3010 * Return nothing.
2795 */ 3011 */
2796static void 3012static void
2797scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3013_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2798{ 3014{
2799 Mpi2SCSIIORequest_t *mpi_request; 3015 Mpi2SCSIIORequest_t *mpi_request;
2800 Mpi2SCSIIOReply_t *mpi_reply; 3016 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2939 scmd->result = DID_RESET << 16; 3155 scmd->result = DID_RESET << 16;
2940 break; 3156 break;
2941 3157
3158 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
3159 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
3160 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
3161 _scsih_eedp_error_handling(scmd, ioc_status);
3162 break;
2942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3163 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2943 case MPI2_IOCSTATUS_INVALID_FUNCTION: 3164 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2944 case MPI2_IOCSTATUS_INVALID_SGL: 3165 case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
5130 .module = THIS_MODULE, 5351 .module = THIS_MODULE,
5131 .name = "Fusion MPT SAS Host", 5352 .name = "Fusion MPT SAS Host",
5132 .proc_name = MPT2SAS_DRIVER_NAME, 5353 .proc_name = MPT2SAS_DRIVER_NAME,
5133 .queuecommand = scsih_qcmd, 5354 .queuecommand = _scsih_qcmd,
5134 .target_alloc = scsih_target_alloc, 5355 .target_alloc = _scsih_target_alloc,
5135 .slave_alloc = scsih_slave_alloc, 5356 .slave_alloc = _scsih_slave_alloc,
5136 .slave_configure = scsih_slave_configure, 5357 .slave_configure = _scsih_slave_configure,
5137 .target_destroy = scsih_target_destroy, 5358 .target_destroy = _scsih_target_destroy,
5138 .slave_destroy = scsih_slave_destroy, 5359 .slave_destroy = _scsih_slave_destroy,
5139 .change_queue_depth = scsih_change_queue_depth, 5360 .change_queue_depth = _scsih_change_queue_depth,
5140 .change_queue_type = scsih_change_queue_type, 5361 .change_queue_type = _scsih_change_queue_type,
5141 .eh_abort_handler = scsih_abort, 5362 .eh_abort_handler = _scsih_abort,
5142 .eh_device_reset_handler = scsih_dev_reset, 5363 .eh_device_reset_handler = _scsih_dev_reset,
5143 .eh_host_reset_handler = scsih_host_reset, 5364 .eh_target_reset_handler = _scsih_target_reset,
5144 .bios_param = scsih_bios_param, 5365 .eh_host_reset_handler = _scsih_host_reset,
5366 .bios_param = _scsih_bios_param,
5145 .can_queue = 1, 5367 .can_queue = 1,
5146 .this_id = -1, 5368 .this_id = -1,
5147 .sg_tablesize = MPT2SAS_SG_DEPTH, 5369 .sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5228} 5450}
5229 5451
5230/** 5452/**
5231 * scsih_remove - detach and remove add host 5453 * _scsih_remove - detach and remove add host
5232 * @pdev: PCI device struct 5454 * @pdev: PCI device struct
5233 * 5455 *
5234 * Return nothing. 5456 * Return nothing.
5235 */ 5457 */
5236static void __devexit 5458static void __devexit
5237scsih_remove(struct pci_dev *pdev) 5459_scsih_remove(struct pci_dev *pdev)
5238{ 5460{
5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5461 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5240 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5462 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5442} 5664}
5443 5665
5444/** 5666/**
5445 * scsih_probe - attach and add scsi host 5667 * _scsih_probe - attach and add scsi host
5446 * @pdev: PCI device struct 5668 * @pdev: PCI device struct
5447 * @id: pci device id 5669 * @id: pci device id
5448 * 5670 *
5449 * Returns 0 success, anything else error. 5671 * Returns 0 success, anything else error.
5450 */ 5672 */
5451static int 5673static int
5452scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5674_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5453{ 5675{
5454 struct MPT2SAS_ADAPTER *ioc; 5676 struct MPT2SAS_ADAPTER *ioc;
5455 struct Scsi_Host *shost; 5677 struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5503 goto out_add_shost_fail; 5725 goto out_add_shost_fail;
5504 } 5726 }
5505 5727
5728 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5729 | SHOST_DIF_TYPE3_PROTECTION);
5730
5506 /* event thread */ 5731 /* event thread */
5507 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5732 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5508 "fw_event%d", ioc->id); 5733 "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5536 5761
5537#ifdef CONFIG_PM 5762#ifdef CONFIG_PM
5538/** 5763/**
5539 * scsih_suspend - power management suspend main entry point 5764 * _scsih_suspend - power management suspend main entry point
5540 * @pdev: PCI device struct 5765 * @pdev: PCI device struct
5541 * @state: PM state change to (usually PCI_D3) 5766 * @state: PM state change to (usually PCI_D3)
5542 * 5767 *
5543 * Returns 0 success, anything else error. 5768 * Returns 0 success, anything else error.
5544 */ 5769 */
5545static int 5770static int
5546scsih_suspend(struct pci_dev *pdev, pm_message_t state) 5771_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5547{ 5772{
5548 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5549 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5564} 5789}
5565 5790
5566/** 5791/**
5567 * scsih_resume - power management resume main entry point 5792 * _scsih_resume - power management resume main entry point
5568 * @pdev: PCI device struct 5793 * @pdev: PCI device struct
5569 * 5794 *
5570 * Returns 0 success, anything else error. 5795 * Returns 0 success, anything else error.
5571 */ 5796 */
5572static int 5797static int
5573scsih_resume(struct pci_dev *pdev) 5798_scsih_resume(struct pci_dev *pdev)
5574{ 5799{
5575 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5800 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5576 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5801 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
5599static struct pci_driver scsih_driver = { 5824static struct pci_driver scsih_driver = {
5600 .name = MPT2SAS_DRIVER_NAME, 5825 .name = MPT2SAS_DRIVER_NAME,
5601 .id_table = scsih_pci_table, 5826 .id_table = scsih_pci_table,
5602 .probe = scsih_probe, 5827 .probe = _scsih_probe,
5603 .remove = __devexit_p(scsih_remove), 5828 .remove = __devexit_p(_scsih_remove),
5604#ifdef CONFIG_PM 5829#ifdef CONFIG_PM
5605 .suspend = scsih_suspend, 5830 .suspend = _scsih_suspend,
5606 .resume = scsih_resume, 5831 .resume = _scsih_resume,
5607#endif 5832#endif
5608}; 5833};
5609 5834
5610 5835
5611/** 5836/**
5612 * scsih_init - main entry point for this driver. 5837 * _scsih_init - main entry point for this driver.
5613 * 5838 *
5614 * Returns 0 success, anything else error. 5839 * Returns 0 success, anything else error.
5615 */ 5840 */
5616static int __init 5841static int __init
5617scsih_init(void) 5842_scsih_init(void)
5618{ 5843{
5619 int error; 5844 int error;
5620 5845
@@ -5630,10 +5855,10 @@ scsih_init(void)
5630 mpt2sas_base_initialize_callback_handler(); 5855 mpt2sas_base_initialize_callback_handler();
5631 5856
5632 /* queuecommand callback hander */ 5857 /* queuecommand callback hander */
5633 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); 5858 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
5634 5859
5635 /* task managment callback handler */ 5860 /* task managment callback handler */
5636 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); 5861 tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
5637 5862
5638 /* base internal commands callback handler */ 5863 /* base internal commands callback handler */
5639 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); 5864 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
5659} 5884}
5660 5885
5661/** 5886/**
5662 * scsih_exit - exit point for this driver (when it is a module). 5887 * _scsih_exit - exit point for this driver (when it is a module).
5663 * 5888 *
5664 * Returns 0 success, anything else error. 5889 * Returns 0 success, anything else error.
5665 */ 5890 */
5666static void __exit 5891static void __exit
5667scsih_exit(void) 5892_scsih_exit(void)
5668{ 5893{
5669 printk(KERN_INFO "mpt2sas version %s unloading\n", 5894 printk(KERN_INFO "mpt2sas version %s unloading\n",
5670 MPT2SAS_DRIVER_VERSION); 5895 MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
5682 mpt2sas_ctl_exit(); 5907 mpt2sas_ctl_exit();
5683} 5908}
5684 5909
5685module_init(scsih_init); 5910module_init(_scsih_init);
5686module_exit(scsih_exit); 5911module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a..686695b155c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
264}; 264};
265 265
266/** 266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture 267 * _transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object 268 * @ioc: per adapter object
269 * @sas_address: expander sas address 269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object 270 * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
274 * Returns 0 for success, non-zero for failure. 274 * Returns 0 for success, non-zero for failure.
275 */ 275 */
276static int 276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, 277_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev) 278 u64 sas_address, struct sas_expander_device *edev)
279{ 279{
280 Mpi2SmpPassthroughRequest_t *mpi_request; 280 Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type == 579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) 580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc, 581 _transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address, 582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy)); 583 rphy_to_expander_device(rphy));
584 584
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
852} 852}
853 853
854/** 854/**
855 * transport_get_linkerrors - 855 * _transport_get_linkerrors -
856 * @phy: The sas phy object 856 * @phy: The sas phy object
857 * 857 *
858 * Only support sas_host direct attached phys. 858 * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
860 * 860 *
861 */ 861 */
862static int 862static int
863transport_get_linkerrors(struct sas_phy *phy) 863_transport_get_linkerrors(struct sas_phy *phy)
864{ 864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy; 866 struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
903} 903}
904 904
905/** 905/**
906 * transport_get_enclosure_identifier - 906 * _transport_get_enclosure_identifier -
907 * @phy: The sas phy object 907 * @phy: The sas phy object
908 * 908 *
909 * Obtain the enclosure logical id for an expander. 909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure. 910 * Returns 0 for success, non-zero for failure.
911 */ 911 */
912static int 912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 913_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{ 914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander; 916 struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
929} 929}
930 930
931/** 931/**
932 * transport_get_bay_identifier - 932 * _transport_get_bay_identifier -
933 * @phy: The sas phy object 933 * @phy: The sas phy object
934 * 934 *
935 * Returns the slot id for a device that resides inside an enclosure. 935 * Returns the slot id for a device that resides inside an enclosure.
936 */ 936 */
937static int 937static int
938transport_get_bay_identifier(struct sas_rphy *rphy) 938_transport_get_bay_identifier(struct sas_rphy *rphy)
939{ 939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device; 941 struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
953} 953}
954 954
955/** 955/**
956 * transport_phy_reset - 956 * _transport_phy_reset -
957 * @phy: The sas phy object 957 * @phy: The sas phy object
958 * @hard_reset: 958 * @hard_reset:
959 * 959 *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
961 * Returns 0 for success, non-zero for failure. 961 * Returns 0 for success, non-zero for failure.
962 */ 962 */
963static int 963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset) 964_transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{ 965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy; 967 struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1002} 1002}
1003 1003
1004/** 1004/**
1005 * transport_smp_handler - transport portal for smp passthru 1005 * _transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object 1006 * @shost: shost object
1007 * @rphy: sas transport rphy object 1007 * @rphy: sas transport rphy object
1008 * @req: 1008 * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1012 * smp_rep_general /sys/class/bsg/expander-5:0 1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */ 1013 */
1014static int 1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 1015_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req) 1016 struct request *req)
1017{ 1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
@@ -1201,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1201} 1200}
1202 1201
1203struct sas_function_template mpt2sas_transport_functions = { 1202struct sas_function_template mpt2sas_transport_functions = {
1204 .get_linkerrors = transport_get_linkerrors, 1203 .get_linkerrors = _transport_get_linkerrors,
1205 .get_enclosure_identifier = transport_get_enclosure_identifier, 1204 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1206 .get_bay_identifier = transport_get_bay_identifier, 1205 .get_bay_identifier = _transport_get_bay_identifier,
1207 .phy_reset = transport_phy_reset, 1206 .phy_reset = _transport_phy_reset,
1208 .smp_handler = transport_smp_handler, 1207 .smp_handler = _transport_smp_handler,
1209}; 1208};
1210 1209
1211struct scsi_transport_template *mpt2sas_transport_template; 1210struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1..00000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "mvsas"
45#define DRV_VERSION "0.5.2"
46#define _MV_DUMP 0
47#define MVS_DISABLE_NVRAM
48#define MVS_DISABLE_MSI
49
50#define mr32(reg) readl(regs + MVS_##reg)
51#define mw32(reg,val) writel((val), regs + MVS_##reg)
52#define mw32_f(reg,val) do { \
53 writel((val), regs + MVS_##reg); \
54 readl(regs + MVS_##reg); \
55 } while (0)
56
57#define MVS_ID_NOT_MAPPED 0x7f
58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
59
60/* offset for D2H FIS in the Received FIS List Structure */
61#define SATA_RECEIVED_D2H_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
63#define SATA_RECEIVED_PIO_FIS(reg_set) \
64 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
65#define UNASSOC_D2H_FIS(id) \
66 ((void *) mvi->rx_fis + 0x100 * id)
67
68#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
69 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
70 (__mc) != 0 && __rest; \
71 (++__lseq), (__mc) >>= 1)
72
73/* driver compile-time configuration */
74enum driver_configuration {
75 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
76 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
77 /* software requires power-of-2
78 ring size */
79
80 MVS_SLOTS = 512, /* command slots */
81 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
82 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
83 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
84 MVS_OAF_SZ = 64, /* Open address frame buffer size */
85
86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
87
88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
90};
91
92/* unchangeable hardware details */
93enum hardware_details {
94 MVS_MAX_PHYS = 8, /* max. possible phys */
95 MVS_MAX_PORTS = 8, /* max. possible ports */
96 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
97};
98
99/* peripheral registers (BAR2) */
100enum peripheral_registers {
101 SPI_CTL = 0x10, /* EEPROM control */
102 SPI_CMD = 0x14, /* EEPROM command */
103 SPI_DATA = 0x18, /* EEPROM data */
104};
105
106enum peripheral_register_bits {
107 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
108 TWSI_RD = (1U << 4), /* EEPROM read access */
109
110 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
111};
112
113/* enhanced mode registers (BAR4) */
114enum hw_registers {
115 MVS_GBL_CTL = 0x04, /* global control */
116 MVS_GBL_INT_STAT = 0x08, /* global irq status */
117 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
118 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
119
120 MVS_CTL = 0x100, /* SAS/SATA port configuration */
121 MVS_PCS = 0x104, /* SAS/SATA port control/status */
122 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
123 MVS_CMD_LIST_HI = 0x10C,
124 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
125 MVS_RX_FIS_HI = 0x114,
126
127 MVS_TX_CFG = 0x120, /* TX configuration */
128 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
129 MVS_TX_HI = 0x128,
130
131 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
132 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
133 MVS_RX_CFG = 0x134, /* RX configuration */
134 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
135 MVS_RX_HI = 0x13C,
136 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
137
138 MVS_INT_COAL = 0x148, /* Int coalescing config */
139 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
140 MVS_INT_STAT = 0x150, /* Central int status */
141 MVS_INT_MASK = 0x154, /* Central int enable */
142 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
143 MVS_INT_MASK_SRS = 0x15C,
144
145 /* ports 1-3 follow after this */
146 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
147 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
148 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
149 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
150
151 /* ports 1-3 follow after this */
152 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
153 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
154
155 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
156 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
157
158 /* ports 1-3 follow after this */
159 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
160 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
161 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
162 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
163
164 /* ports 1-3 follow after this */
165 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
166 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
167 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
168 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
169};
170
171enum hw_register_bits {
172 /* MVS_GBL_CTL */
173 INT_EN = (1U << 1), /* Global int enable */
174 HBA_RST = (1U << 0), /* HBA reset */
175
176 /* MVS_GBL_INT_STAT */
177 INT_XOR = (1U << 4), /* XOR engine event */
178 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
179
180 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
181 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
182 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
183 MODE_AUTO_DET_PORT6 = (1U << 14),
184 MODE_AUTO_DET_PORT5 = (1U << 13),
185 MODE_AUTO_DET_PORT4 = (1U << 12),
186 MODE_AUTO_DET_PORT3 = (1U << 11),
187 MODE_AUTO_DET_PORT2 = (1U << 10),
188 MODE_AUTO_DET_PORT1 = (1U << 9),
189 MODE_AUTO_DET_PORT0 = (1U << 8),
190 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
191 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
192 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
193 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
194 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
195 MODE_SAS_PORT6_MASK = (1U << 6),
196 MODE_SAS_PORT5_MASK = (1U << 5),
197 MODE_SAS_PORT4_MASK = (1U << 4),
198 MODE_SAS_PORT3_MASK = (1U << 3),
199 MODE_SAS_PORT2_MASK = (1U << 2),
200 MODE_SAS_PORT1_MASK = (1U << 1),
201 MODE_SAS_PORT0_MASK = (1U << 0),
202 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
203 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
204 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
205 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
206
207 /* SAS_MODE value may be
208 * dictated (in hw) by values
209 * of SATA_TARGET & AUTO_DET
210 */
211
212 /* MVS_TX_CFG */
213 TX_EN = (1U << 16), /* Enable TX */
214 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
215
216 /* MVS_RX_CFG */
217 RX_EN = (1U << 16), /* Enable RX */
218 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
219
220 /* MVS_INT_COAL */
221 COAL_EN = (1U << 16), /* Enable int coalescing */
222
223 /* MVS_INT_STAT, MVS_INT_MASK */
224 CINT_I2C = (1U << 31), /* I2C event */
225 CINT_SW0 = (1U << 30), /* software event 0 */
226 CINT_SW1 = (1U << 29), /* software event 1 */
227 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
228 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
229 CINT_MEM = (1U << 26), /* int mem parity err */
230 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
231 CINT_SRS = (1U << 3), /* SRS event */
232 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
233 CINT_DONE = (1U << 0), /* cmd completion */
234
235 /* shl for ports 1-3 */
236 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
237 CINT_PORT = (1U << 8), /* port0 event */
238 CINT_PORT_MASK_OFFSET = 8,
239 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
240
241 /* TX (delivery) ring bits */
242 TXQ_CMD_SHIFT = 29,
243 TXQ_CMD_SSP = 1, /* SSP protocol */
244 TXQ_CMD_SMP = 2, /* SMP protocol */
245 TXQ_CMD_STP = 3, /* STP/SATA protocol */
246 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
247 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
248 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
249 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
250 TXQ_SRS_SHIFT = 20, /* SATA register set */
251 TXQ_SRS_MASK = 0x7f,
252 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
253 TXQ_PHY_MASK = 0xff,
254 TXQ_SLOT_MASK = 0xfff, /* slot number */
255
256 /* RX (completion) ring bits */
257 RXQ_GOOD = (1U << 23), /* Response good */
258 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
259 RXQ_CMD_RX = (1U << 20), /* target cmd received */
260 RXQ_ATTN = (1U << 19), /* attention */
261 RXQ_RSP = (1U << 18), /* response frame xfer'd */
262 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
263 RXQ_DONE = (1U << 16), /* cmd complete */
264 RXQ_SLOT_MASK = 0xfff, /* slot number */
265
266 /* mvs_cmd_hdr bits */
267 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
268 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
269
270 /* SSP initiator only */
271 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
272
273 /* SSP initiator or target */
274 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
275
276 /* SSP target only */
277 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
278 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
279 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
280 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
281
282 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
283 MCH_FBURST = (1U << 11), /* first burst (SSP) */
284 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
285 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
286 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
287 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
288 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
289 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
290 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
291 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
292
293 CCTL_RST = (1U << 5), /* port logic reset */
294
295 /* 0(LSB first), 1(MSB first) */
296 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
297 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
298 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
299 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
300
301 /* MVS_Px_SER_CTLSTAT (per-phy control) */
302 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
303 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
304 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
305 PHY_RST = (1U << 0), /* phy reset */
306 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
307 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
308 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
309 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
310 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
311 PHY_READY_MASK = (1U << 20),
312
313 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
314 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
315 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
316 PHYEV_AN = (1U << 18), /* SATA async notification */
317 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
318 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
319 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
320 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
321 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
322 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
323 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
324 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
325 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
326 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
327 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
328 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
329 PHYEV_ID_DONE = (1U << 2), /* identify done */
330 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
331 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
332
333 /* MVS_PCS */
334 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
335 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
336 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
337 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
338 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
339 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
340 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
341 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
342 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
343 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
344
345 /* Port n Attached Device Info */
346 PORT_DEV_SSP_TRGT = (1U << 19),
347 PORT_DEV_SMP_TRGT = (1U << 18),
348 PORT_DEV_STP_TRGT = (1U << 17),
349 PORT_DEV_SSP_INIT = (1U << 11),
350 PORT_DEV_SMP_INIT = (1U << 10),
351 PORT_DEV_STP_INIT = (1U << 9),
352 PORT_PHY_ID_MASK = (0xFFU << 24),
353 PORT_DEV_TRGT_MASK = (0x7U << 17),
354 PORT_DEV_INIT_MASK = (0x7U << 9),
355 PORT_DEV_TYPE_MASK = (0x7U << 0),
356
357 /* Port n PHY Status */
358 PHY_RDY = (1U << 2),
359 PHY_DW_SYNC = (1U << 1),
360 PHY_OOB_DTCTD = (1U << 0),
361
362 /* VSR */
363 /* PHYMODE 6 (CDB) */
364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
378};
379
380enum mvs_info_flags {
381 MVF_MSI = (1U << 0), /* MSI is enabled */
382 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
383};
384
385enum sas_cmd_port_registers {
386 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
387 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
388 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
389 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
390 CMD_OOB_SPACE = 0x110, /* OOB space control register */
391 CMD_OOB_BURST = 0x114, /* OOB burst control register */
392 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
393 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
394 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
395 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
396 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
397 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
398 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
399 CMD_ID_TEST = 0x134, /* ID test register */
400 CMD_PL_TIMER = 0x138, /* PL timer register */
401 CMD_WD_TIMER = 0x13c, /* WD timer register */
402 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
403 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
404 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
405 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
406 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
407 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
408 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
409 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
410 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
411 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
412 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
413 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
414 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
415 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
416 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
417 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
418 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
419 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
420 CMD_RESET_COUNT = 0x188, /* Reset Count */
421 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
422 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
423 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
424 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
425 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
426 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
427 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
428 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
429 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
430 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
431 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
432 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
433 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
434 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
435 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
436};
437
438/* SAS/SATA configuration port registers, aka phy registers */
439enum sas_sata_config_port_regs {
440 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
441 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
442 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
443 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
444 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
445 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
446 PHYR_SATA_CTL = 0x18, /* SATA control */
447 PHYR_PHY_STAT = 0x1C, /* PHY status */
448 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
449 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
450 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
451 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
452 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
453 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
454 PHYR_WIDE_PORT = 0x38, /* wide port participating */
455 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
456 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
457 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
458};
459
460/* SAS/SATA Vendor Specific Port Registers */
461enum sas_sata_vsp_regs {
462 VSR_PHY_STAT = 0x00, /* Phy Status */
463 VSR_PHY_MODE1 = 0x01, /* phy tx */
464 VSR_PHY_MODE2 = 0x02, /* tx scc */
465 VSR_PHY_MODE3 = 0x03, /* pll */
466 VSR_PHY_MODE4 = 0x04, /* VCO */
467 VSR_PHY_MODE5 = 0x05, /* Rx */
468 VSR_PHY_MODE6 = 0x06, /* CDR */
469 VSR_PHY_MODE7 = 0x07, /* Impedance */
470 VSR_PHY_MODE8 = 0x08, /* Voltage */
471 VSR_PHY_MODE9 = 0x09, /* Test */
472 VSR_PHY_MODE10 = 0x0A, /* Power */
473 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
474 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
475 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
476};
477
478enum pci_cfg_registers {
479 PCR_PHY_CTL = 0x40,
480 PCR_PHY_CTL2 = 0x90,
481 PCR_DEV_CTRL = 0xE8,
482};
483
484enum pci_cfg_register_bits {
485 PCTL_PWR_ON = (0xFU << 24),
486 PCTL_OFF = (0xFU << 12),
487 PRD_REQ_SIZE = (0x4000),
488 PRD_REQ_MASK = (0x00007000),
489};
490
491enum nvram_layout_offsets {
492 NVR_SIG = 0x00, /* 0xAA, 0x55 */
493 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
494};
495
496enum chip_flavors {
497 chip_6320,
498 chip_6440,
499 chip_6480,
500};
501
502enum port_type {
503 PORT_TYPE_SAS = (1L << 1),
504 PORT_TYPE_SATA = (1L << 0),
505};
506
507/* Command Table Format */
508enum ct_format {
509 /* SSP */
510 SSP_F_H = 0x00,
511 SSP_F_IU = 0x18,
512 SSP_F_MAX = 0x4D,
513 /* STP */
514 STP_CMD_FIS = 0x00,
515 STP_ATAPI_CMD = 0x40,
516 STP_F_MAX = 0x10,
517 /* SMP */
518 SMP_F_T = 0x00,
519 SMP_F_DEP = 0x01,
520 SMP_F_MAX = 0x101,
521};
522
523enum status_buffer {
524 SB_EIR_OFF = 0x00, /* Error Information Record */
525 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
526 SB_RFB_MAX = 0x400, /* RFB size*/
527};
528
529enum error_info_rec {
530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
567};
568
569struct mvs_chip_info {
570 u32 n_phy;
571 u32 srs_sz;
572 u32 slot_width;
573};
574
575struct mvs_err_info {
576 __le32 flags;
577 __le32 flags2;
578};
579
580struct mvs_prd {
581 __le64 addr; /* 64-bit buffer address */
582 __le32 reserved;
583 __le32 len; /* 16-bit length */
584};
585
586struct mvs_cmd_hdr {
587 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
588 __le32 lens; /* cmd, max resp frame len */
589 __le32 tags; /* targ port xfer tag; tag */
590 __le32 data_len; /* data xfer len */
591 __le64 cmd_tbl; /* command table address */
592 __le64 open_frame; /* open addr frame address */
593 __le64 status_buf; /* status buffer address */
594 __le64 prd_tbl; /* PRD tbl address */
595 __le32 reserved[4];
596};
597
598struct mvs_port {
599 struct asd_sas_port sas_port;
600 u8 port_attached;
601 u8 taskfileset;
602 u8 wide_port_phymap;
603 struct list_head list;
604};
605
606struct mvs_phy {
607 struct mvs_port *port;
608 struct asd_sas_phy sas_phy;
609 struct sas_identify identify;
610 struct scsi_device *sdev;
611 u64 dev_sas_addr;
612 u64 att_dev_sas_addr;
613 u32 att_dev_info;
614 u32 dev_info;
615 u32 phy_type;
616 u32 phy_status;
617 u32 irq_status;
618 u32 frame_rcvd_size;
619 u8 frame_rcvd[32];
620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
642};
643
644struct mvs_info {
645 unsigned long flags;
646
647 spinlock_t lock; /* host-wide lock */
648 struct pci_dev *pdev; /* our device */
649 void __iomem *regs; /* enhanced mode registers */
650 void __iomem *peri_regs; /* peripheral registers */
651
652 u8 sas_addr[SAS_ADDR_SIZE];
653 struct sas_ha_struct sas; /* SCSI/SAS glue */
654 struct Scsi_Host *shost;
655
656 __le32 *tx; /* TX (delivery) DMA ring */
657 dma_addr_t tx_dma;
658 u32 tx_prod; /* cached next-producer idx */
659
660 __le32 *rx; /* RX (completion) DMA ring */
661 dma_addr_t rx_dma;
662 u32 rx_cons; /* RX consumer idx */
663
664 __le32 *rx_fis; /* RX'd FIS area */
665 dma_addr_t rx_fis_dma;
666
667 struct mvs_cmd_hdr *slot; /* DMA command header slots */
668 dma_addr_t slot_dma;
669
670 const struct mvs_chip_info *chip;
671
672 u8 tags[MVS_SLOTS];
673 struct mvs_slot_info slot_info[MVS_SLOTS];
674 /* further per-slot information */
675 struct mvs_phy phy[MVS_MAX_PHYS];
676 struct mvs_port port[MVS_MAX_PHYS];
677#ifdef MVS_USE_TASKLET
678 struct tasklet_struct tasklet;
679#endif
680};
681
682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
683 void *funcdata);
684static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
685static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
686static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
687static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
688static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
689static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
690
691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
695
696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
697static void mvs_scan_start(struct Scsi_Host *);
698static int mvs_slave_configure(struct scsi_device *sdev);
699
700static struct scsi_transport_template *mvs_stt;
701
702static const struct mvs_chip_info mvs_chips[] = {
703 [chip_6320] = { 2, 16, 9 },
704 [chip_6440] = { 4, 16, 9 },
705 [chip_6480] = { 8, 32, 10 },
706};
707
708static struct scsi_host_template mvs_sht = {
709 .module = THIS_MODULE,
710 .name = DRV_NAME,
711 .queuecommand = sas_queuecommand,
712 .target_alloc = sas_target_alloc,
713 .slave_configure = mvs_slave_configure,
714 .slave_destroy = sas_slave_destroy,
715 .scan_finished = mvs_scan_finished,
716 .scan_start = mvs_scan_start,
717 .change_queue_depth = sas_change_queue_depth,
718 .change_queue_type = sas_change_queue_type,
719 .bios_param = sas_bios_param,
720 .can_queue = 1,
721 .cmd_per_lun = 1,
722 .this_id = -1,
723 .sg_tablesize = SG_ALL,
724 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
725 .use_clustering = ENABLE_CLUSTERING,
726 .eh_device_reset_handler = sas_eh_device_reset_handler,
727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
728 .slave_alloc = sas_slave_alloc,
729 .target_destroy = sas_target_destroy,
730 .ioctl = sas_ioctl,
731};
732
733static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
734{
735 u32 i;
736 u32 run;
737 u32 offset;
738
739 offset = 0;
740 while (size) {
741 printk("%08X : ", baseaddr + offset);
742 if (size >= 16)
743 run = 16;
744 else
745 run = size;
746 size -= run;
747 for (i = 0; i < 16; i++) {
748 if (i < run)
749 printk("%02X ", (u32)data[i]);
750 else
751 printk(" ");
752 }
753 printk(": ");
754 for (i = 0; i < run; i++)
755 printk("%c", isalnum(data[i]) ? data[i] : '.');
756 printk("\n");
757 data = &data[16];
758 offset += run;
759 }
760 printk("\n");
761}
762
763#if _MV_DUMP
764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
765 enum sas_protocol proto)
766{
767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
770
771 offset = slot->cmd_size + MVS_OAF_SZ +
772 sizeof(struct mvs_prd) * slot->n_elem;
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
774 tag);
775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset);
777}
778#endif
779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto)
782{
783#if _MV_DUMP
784 u32 sz, w_ptr;
785 u64 addr;
786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev;
788 struct mvs_slot_info *slot = &mvi->slot_info[tag];
789
790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = slot->tx;
793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
794 dev_printk(KERN_DEBUG, &pdev->dev,
795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
796 dev_printk(KERN_DEBUG, &pdev->dev,
797 "Delivery Queue Base Address=0x%llX (PA)"
798 "(tx_dma=0x%llX), Entry=%04d\n",
799 addr, mvi->tx_dma, w_ptr);
800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
802 /*Command List */
803 addr = mvi->slot_dma;
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Command List Base Address=0x%llX (PA)"
806 "(slot_dma=0x%llX), Header=%03d\n",
807 addr, slot->buf_dma, tag);
808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
809 /*mvs_cmd_hdr */
810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
811 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
812 /*1.command table area */
813 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
814 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
815 /*2.open address frame area */
816 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
817 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
818 (u32) slot->buf_dma + slot->cmd_size);
819 /*3.status buffer */
820 mvs_hba_sb_dump(mvi, tag, proto);
821 /*4.PRD table */
822 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
823 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
824 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
825 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
826#endif
827}
828
829static void mvs_hba_cq_dump(struct mvs_info *mvi)
830{
831#if (_MV_DUMP > 2)
832 u64 addr;
833 void __iomem *regs = mvi->regs;
834 struct pci_dev *pdev = mvi->pdev;
835 u32 entry = mvi->rx_cons + 1;
836 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
837
838 /*Completion Queue */
839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
842 dev_printk(KERN_DEBUG, &pdev->dev,
843 "Completion List Base Address=0x%llX (PA), "
844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
845 addr, entry - 1, mvi->rx[0]);
846 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
847 mvi->rx_dma + sizeof(u32) * entry);
848#endif
849}
850
851static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
852{
853 void __iomem *regs = mvi->regs;
854 u32 tmp;
855
856 tmp = mr32(GBL_CTL);
857
858 mw32(GBL_CTL, tmp | INT_EN);
859}
860
861static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
862{
863 void __iomem *regs = mvi->regs;
864 u32 tmp;
865
866 tmp = mr32(GBL_CTL);
867
868 mw32(GBL_CTL, tmp & ~INT_EN);
869}
870
871static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
872
873/* move to PCI layer or libata core? */
874static int pci_go_64(struct pci_dev *pdev)
875{
876 int rc;
877
878 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
879 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
880 if (rc) {
881 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
882 if (rc) {
883 dev_printk(KERN_ERR, &pdev->dev,
884 "64-bit DMA enable failed\n");
885 return rc;
886 }
887 }
888 } else {
889 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
890 if (rc) {
891 dev_printk(KERN_ERR, &pdev->dev,
892 "32-bit DMA enable failed\n");
893 return rc;
894 }
895 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
896 if (rc) {
897 dev_printk(KERN_ERR, &pdev->dev,
898 "32-bit consistent DMA enable failed\n");
899 return rc;
900 }
901 }
902
903 return rc;
904}
905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
918{
919 void *bitmap = (void *) &mvi->tags;
920 clear_bit(tag, bitmap);
921}
922
923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
924{
925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
932}
933
934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
935{
936 unsigned int index, tag;
937 void *bitmap = (void *) &mvi->tags;
938
939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
940 tag = index;
941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
946}
947
948static void mvs_tag_init(struct mvs_info *mvi)
949{
950 int i;
951 for (i = 0; i < MVS_SLOTS; ++i)
952 mvs_tag_clear(mvi, i);
953}
954
955#ifndef MVS_DISABLE_NVRAM
956static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
957{
958 int timeout = 1000;
959
960 if (addr & ~SPI_ADDR_MASK)
961 return -EINVAL;
962
963 writel(addr, regs + SPI_CMD);
964 writel(TWSI_RD, regs + SPI_CTL);
965
966 while (timeout-- > 0) {
967 if (readl(regs + SPI_CTL) & TWSI_RDY) {
968 *data = readl(regs + SPI_DATA);
969 return 0;
970 }
971
972 udelay(10);
973 }
974
975 return -EBUSY;
976}
977
978static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
979 void *buf, u32 buflen)
980{
981 u32 addr_end, tmp_addr, i, j;
982 u32 tmp = 0;
983 int rc;
984 u8 *tmp8, *buf8 = buf;
985
986 addr_end = addr + buflen;
987 tmp_addr = ALIGN(addr, 4);
988 if (addr > 0xff)
989 return -EINVAL;
990
991 j = addr & 0x3;
992 if (j) {
993 rc = mvs_eep_read(regs, tmp_addr, &tmp);
994 if (rc)
995 return rc;
996
997 tmp8 = (u8 *)&tmp;
998 for (i = j; i < 4; i++)
999 *buf8++ = tmp8[i];
1000
1001 tmp_addr += 4;
1002 }
1003
1004 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1005 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1006 if (rc)
1007 return rc;
1008
1009 memcpy(buf8, &tmp, 4);
1010 buf8 += 4;
1011 }
1012
1013 if (tmp_addr < addr_end) {
1014 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1015 if (rc)
1016 return rc;
1017
1018 tmp8 = (u8 *)&tmp;
1019 j = addr_end - tmp_addr;
1020 for (i = 0; i < j; i++)
1021 *buf8++ = tmp8[i];
1022
1023 tmp_addr += 4;
1024 }
1025
1026 return 0;
1027}
1028#endif
1029
1030static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
1031 void *buf, u32 buflen)
1032{
1033#ifndef MVS_DISABLE_NVRAM
1034 void __iomem *regs = mvi->regs;
1035 int rc, i;
1036 u32 sum;
1037 u8 hdr[2], *tmp;
1038 const char *msg;
1039
1040 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1041 if (rc) {
1042 msg = "nvram hdr read failed";
1043 goto err_out;
1044 }
1045 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1046 if (rc) {
1047 msg = "nvram read failed";
1048 goto err_out;
1049 }
1050
1051 if (hdr[0] != 0x5A) {
1052 /* entry id */
1053 msg = "invalid nvram entry id";
1054 rc = -ENOENT;
1055 goto err_out;
1056 }
1057
1058 tmp = buf;
1059 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1060 for (i = 0; i < buflen; i++)
1061 sum += ((u32)tmp[i]);
1062
1063 if (sum) {
1064 msg = "nvram checksum failure";
1065 rc = -EILSEQ;
1066 goto err_out;
1067 }
1068
1069 return 0;
1070
1071err_out:
1072 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1073 return rc;
1074#else
1075 /* FIXME , For SAS target mode */
1076 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1077 return 0;
1078#endif
1079}
1080
1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1082{
1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1085
1086 if (!phy->phy_attached)
1087 return;
1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1099 if (phy->phy_type & PORT_TYPE_SAS) {
1100 struct sas_identify_frame *id;
1101
1102 id = (struct sas_identify_frame *)phy->frame_rcvd;
1103 id->dev_type = phy->identify.device_type;
1104 id->initiator_bits = SAS_PROTOCOL_ALL;
1105 id->target_bits = phy->identify.target_port_protocols;
1106 } else if (phy->phy_type & PORT_TYPE_SATA) {
1107 /* TODO */
1108 }
1109 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1110 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1111 PORTE_BYTES_DMAED);
1112}
1113
1114static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1115{
1116 /* give the phy enabling interrupt event time to come in (1s
1117 * is empirically about all it takes) */
1118 if (time < HZ)
1119 return 0;
1120 /* Wait for discovery to finish */
1121 scsi_flush_work(shost);
1122 return 1;
1123}
1124
1125static void mvs_scan_start(struct Scsi_Host *shost)
1126{
1127 int i;
1128 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129
1130 for (i = 0; i < mvi->chip->n_phy; ++i) {
1131 mvs_bytes_dmaed(mvi, i);
1132 }
1133}
1134
1135static int mvs_slave_configure(struct scsi_device *sdev)
1136{
1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1139
1140 if (ret)
1141 return ret;
1142
1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1152}
1153
1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1155{
1156 struct pci_dev *pdev = mvi->pdev;
1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1160
1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1162 /*
1163 * events is port event now ,
1164 * we need check the interrupt status which belongs to per port.
1165 */
1166 dev_printk(KERN_DEBUG, &pdev->dev,
1167 "Port %d Event = %X\n",
1168 phy_no, phy->irq_status);
1169
1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1173 sas_phy_disconnected(sas_phy);
1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1178 } else
1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1180 }
1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1182 if (phy->irq_status & PHYEV_COMWAKE) {
1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1184 mvs_write_port_irq_mask(mvi, phy_no,
1185 tmp | PHYEV_SIG_FIS);
1186 }
1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1189 if (phy->phy_status) {
1190 mvs_detect_porttype(mvi, phy_no);
1191
1192 if (phy->phy_type & PORT_TYPE_SATA) {
1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1194 phy_no);
1195 tmp &= ~PHYEV_SIG_FIS;
1196 mvs_write_port_irq_mask(mvi,
1197 phy_no, tmp);
1198 }
1199
1200 mvs_update_phyinfo(mvi, phy_no, 0);
1201 sas_ha->notify_phy_event(sas_phy,
1202 PHYE_OOB_DONE);
1203 mvs_bytes_dmaed(mvi, phy_no);
1204 } else {
1205 dev_printk(KERN_DEBUG, &pdev->dev,
1206 "plugin interrupt but phy is gone\n");
1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1208 NULL);
1209 }
1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1212 sas_ha->notify_port_event(sas_phy,
1213 PORTE_BROADCAST_RCVD);
1214 }
1215 }
1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1217}
1218
1219static void mvs_int_sata(struct mvs_info *mvi)
1220{
1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1269}
1270
1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1278 struct mvs_slot_info *slot, u32 slot_idx)
1279{
1280 if (!sas_protocol_ata(task->task_proto))
1281 if (slot->n_elem)
1282 pci_unmap_sg(mvi->pdev, task->scatter,
1283 slot->n_elem, task->data_dir);
1284
1285 switch (task->task_proto) {
1286 case SAS_PROTOCOL_SMP:
1287 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1288 PCI_DMA_FROMDEVICE);
1289 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1290 PCI_DMA_TODEVICE);
1291 break;
1292
1293 case SAS_PROTOCOL_SATA:
1294 case SAS_PROTOCOL_STP:
1295 case SAS_PROTOCOL_SSP:
1296 default:
1297 /* do nothing */
1298 break;
1299 }
1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1302 slot->task = NULL;
1303 slot->port = NULL;
1304}
1305
1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1307 u32 slot_idx)
1308{
1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1312 int stat = SAM_CHECK_COND;
1313
1314 if (err_dw1 & SLOT_BSY_ERR) {
1315 stat = SAS_QUEUE_FULL;
1316 mvs_slot_reset(mvi, task, slot_idx);
1317 }
1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1332
1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1335}
1336
1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1338{
1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1341 struct sas_task *task = slot->task;
1342 struct task_status_struct *tstat;
1343 struct mvs_port *port;
1344 bool aborted;
1345 void *to;
1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1352 spin_lock(&task->task_state_lock);
1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1354 if (!aborted) {
1355 task->task_state_flags &=
1356 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1357 task->task_state_flags |= SAS_TASK_STATE_DONE;
1358 }
1359 spin_unlock(&task->task_state_lock);
1360
1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1364 return -1;
1365 }
1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1369 memset(tstat, 0, sizeof(*tstat));
1370 tstat->resp = SAS_TASK_COMPLETE;
1371
1372 if (unlikely(!port->port_attached || flags)) {
1373 mvs_slot_err(mvi, task, slot_idx);
1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1376 goto out;
1377 }
1378
1379 /* error info record present */
1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1382 goto out;
1383 }
1384
1385 switch (task->task_proto) {
1386 case SAS_PROTOCOL_SSP:
1387 /* hw says status == 0, datapres == 0 */
1388 if (rx_desc & RXQ_GOOD) {
1389 tstat->stat = SAM_GOOD;
1390 tstat->resp = SAS_TASK_COMPLETE;
1391 }
1392 /* response frame present */
1393 else if (rx_desc & RXQ_RSP) {
1394 struct ssp_response_iu *iu =
1395 slot->response + sizeof(struct mvs_err_info);
1396 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1397 }
1398
1399 /* should never happen? */
1400 else
1401 tstat->stat = SAM_CHECK_COND;
1402 break;
1403
1404 case SAS_PROTOCOL_SMP: {
1405 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1406 tstat->stat = SAM_GOOD;
1407 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1408 memcpy(to + sg_resp->offset,
1409 slot->response + sizeof(struct mvs_err_info),
1410 sg_dma_len(sg_resp));
1411 kunmap_atomic(to, KM_IRQ0);
1412 break;
1413 }
1414
1415 case SAS_PROTOCOL_SATA:
1416 case SAS_PROTOCOL_STP:
1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1419 break;
1420 }
1421
1422 default:
1423 tstat->stat = SAM_CHECK_COND;
1424 break;
1425 }
1426
1427out:
1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1435 return tstat->stat;
1436}
1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1456static void mvs_int_full(struct mvs_info *mvi)
1457{
1458 void __iomem *regs = mvi->regs;
1459 u32 tmp, stat;
1460 int i;
1461
1462 stat = mr32(INT_STAT);
1463
1464 mvs_int_rx(mvi, false);
1465
1466 for (i = 0; i < MVS_MAX_PORTS; i++) {
1467 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1468 if (tmp)
1469 mvs_int_port(mvi, i, tmp);
1470 }
1471
1472 if (stat & CINT_SRS)
1473 mvs_int_sata(mvi);
1474
1475 mw32(INT_STAT, stat);
1476}
1477
1478static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1479{
1480 void __iomem *regs = mvi->regs;
1481 u32 rx_prod_idx, rx_desc;
1482 bool attn = false;
1483 struct pci_dev *pdev = mvi->pdev;
1484
1485 /* the first dword in the RX ring is special: it contains
1486 * a mirror of the hardware's RX producer index, so that
1487 * we don't have to stall the CPU reading that register.
1488 * The actual RX ring is offset by one dword, due to this.
1489 */
1490 rx_prod_idx = mvi->rx_cons;
1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1493 return 0;
1494
1495 /* The CMPL_Q may come late, read from register and try again
1496 * note: if coalescing is enabled,
1497 * it will need to read from register every time for sure
1498 */
1499 if (mvi->rx_cons == rx_prod_idx)
1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1501
1502 if (mvi->rx_cons == rx_prod_idx)
1503 return 0;
1504
1505 while (mvi->rx_cons != rx_prod_idx) {
1506
1507 /* increment our internal RX consumer pointer */
1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1509
1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1511
1512 if (likely(rx_desc & RXQ_DONE))
1513 mvs_slot_complete(mvi, rx_desc, 0);
1514 if (rx_desc & RXQ_ATTN) {
1515 attn = true;
1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1517 rx_desc);
1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1527 }
1528 }
1529
1530 if (attn && self_clear)
1531 mvs_int_full(mvi);
1532
1533 return 0;
1534}
1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1554{
1555 struct mvs_info *mvi = opaque;
1556 void __iomem *regs = mvi->regs;
1557 u32 stat;
1558
1559 stat = mr32(GBL_INT_STAT);
1560
1561 if (stat == 0 || stat == 0xffffffff)
1562 return IRQ_NONE;
1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1568 spin_lock(&mvi->lock);
1569
1570 mvs_int_full(mvi);
1571
1572 spin_unlock(&mvi->lock);
1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1576 return IRQ_HANDLED;
1577}
1578
1579#ifndef MVS_DISABLE_MSI
1580static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1581{
1582 struct mvs_info *mvi = opaque;
1583
1584#ifndef MVS_USE_TASKLET
1585 spin_lock(&mvi->lock);
1586
1587 mvs_int_rx(mvi, true);
1588
1589 spin_unlock(&mvi->lock);
1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1593 return IRQ_HANDLED;
1594}
1595#endif
1596
1597struct mvs_task_exec_info {
1598 struct sas_task *task;
1599 struct mvs_cmd_hdr *hdr;
1600 struct mvs_port *port;
1601 u32 tag;
1602 int n_elem;
1603};
1604
1605static int mvs_task_prep_smp(struct mvs_info *mvi,
1606 struct mvs_task_exec_info *tei)
1607{
1608 int elem, rc, i;
1609 struct sas_task *task = tei->task;
1610 struct mvs_cmd_hdr *hdr = tei->hdr;
1611 struct scatterlist *sg_req, *sg_resp;
1612 u32 req_len, resp_len, tag = tei->tag;
1613 void *buf_tmp;
1614 u8 *buf_oaf;
1615 dma_addr_t buf_tmp_dma;
1616 struct mvs_prd *buf_prd;
1617 struct scatterlist *sg;
1618 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1619 struct asd_sas_port *sas_port = task->dev->port;
1620 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1621#if _MV_DUMP
1622 u8 *buf_cmd;
1623 void *from;
1624#endif
1625 /*
1626 * DMA-map SMP request, response buffers
1627 */
1628 sg_req = &task->smp_task.smp_req;
1629 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1630 if (!elem)
1631 return -ENOMEM;
1632 req_len = sg_dma_len(sg_req);
1633
1634 sg_resp = &task->smp_task.smp_resp;
1635 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1636 if (!elem) {
1637 rc = -ENOMEM;
1638 goto err_out;
1639 }
1640 resp_len = sg_dma_len(sg_resp);
1641
1642 /* must be in dwords */
1643 if ((req_len & 0x3) || (resp_len & 0x3)) {
1644 rc = -EINVAL;
1645 goto err_out_2;
1646 }
1647
1648 /*
1649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1650 */
1651
1652 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1653 buf_tmp = slot->buf;
1654 buf_tmp_dma = slot->buf_dma;
1655
1656#if _MV_DUMP
1657 buf_cmd = buf_tmp;
1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1659 buf_tmp += req_len;
1660 buf_tmp_dma += req_len;
1661 slot->cmd_size = req_len;
1662#else
1663 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1664#endif
1665
1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1667 buf_oaf = buf_tmp;
1668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1669
1670 buf_tmp += MVS_OAF_SZ;
1671 buf_tmp_dma += MVS_OAF_SZ;
1672
1673 /* region 3: PRD table ********************************************* */
1674 buf_prd = buf_tmp;
1675 if (tei->n_elem)
1676 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1677 else
1678 hdr->prd_tbl = 0;
1679
1680 i = sizeof(struct mvs_prd) * tei->n_elem;
1681 buf_tmp += i;
1682 buf_tmp_dma += i;
1683
1684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1685 slot->response = buf_tmp;
1686 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1687
1688 /*
1689 * Fill in TX ring and command slot header
1690 */
1691 slot->tx = mvi->tx_prod;
1692 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1693 TXQ_MODE_I | tag |
1694 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1695
1696 hdr->flags |= flags;
1697 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1698 hdr->tags = cpu_to_le32(tag);
1699 hdr->data_len = 0;
1700
1701 /* generate open address frame hdr (first 12 bytes) */
1702 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1703 buf_oaf[1] = task->dev->linkrate & 0xf;
1704 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1705 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1706
1707 /* fill in PRD (scatter/gather) table, if any */
1708 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1709 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1710 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1711 buf_prd++;
1712 }
1713
1714#if _MV_DUMP
1715 /* copy cmd table */
1716 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1717 memcpy(buf_cmd, from + sg_req->offset, req_len);
1718 kunmap_atomic(from, KM_IRQ0);
1719#endif
1720 return 0;
1721
1722err_out_2:
1723 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1724 PCI_DMA_FROMDEVICE);
1725err_out:
1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1727 PCI_DMA_TODEVICE);
1728 return rc;
1729}
1730
1731static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1732{
1733 void __iomem *regs = mvi->regs;
1734 u32 tmp, offs;
1735 u8 *tfs = &port->taskfileset;
1736
1737 if (*tfs == MVS_ID_NOT_MAPPED)
1738 return;
1739
1740 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1741 if (*tfs < 16) {
1742 tmp = mr32(PCS);
1743 mw32(PCS, tmp & ~offs);
1744 } else {
1745 tmp = mr32(CTL);
1746 mw32(CTL, tmp & ~offs);
1747 }
1748
1749 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1750 if (tmp)
1751 mw32(INT_STAT_SRS, tmp);
1752
1753 *tfs = MVS_ID_NOT_MAPPED;
1754}
1755
1756static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1757{
1758 int i;
1759 u32 tmp, offs;
1760 void __iomem *regs = mvi->regs;
1761
1762 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1763 return 0;
1764
1765 tmp = mr32(PCS);
1766
1767 for (i = 0; i < mvi->chip->srs_sz; i++) {
1768 if (i == 16)
1769 tmp = mr32(CTL);
1770 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1771 if (!(tmp & offs)) {
1772 port->taskfileset = i;
1773
1774 if (i < 16)
1775 mw32(PCS, tmp | offs);
1776 else
1777 mw32(CTL, tmp | offs);
1778 tmp = mr32(INT_STAT_SRS) & (1U << i);
1779 if (tmp)
1780 mw32(INT_STAT_SRS, tmp);
1781 return 0;
1782 }
1783 }
1784 return MVS_ID_NOT_MAPPED;
1785}
1786
1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1788{
1789 struct ata_queued_cmd *qc = task->uldd_task;
1790
1791 if (qc) {
1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
1802static int mvs_task_prep_ata(struct mvs_info *mvi,
1803 struct mvs_task_exec_info *tei)
1804{
1805 struct sas_task *task = tei->task;
1806 struct domain_device *dev = task->dev;
1807 struct mvs_cmd_hdr *hdr = tei->hdr;
1808 struct asd_sas_port *sas_port = dev->port;
1809 struct mvs_slot_info *slot;
1810 struct scatterlist *sg;
1811 struct mvs_prd *buf_prd;
1812 struct mvs_port *port = tei->port;
1813 u32 tag = tei->tag;
1814 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1815 void *buf_tmp;
1816 u8 *buf_cmd, *buf_oaf;
1817 dma_addr_t buf_tmp_dma;
1818 u32 i, req_len, resp_len;
1819 const u32 max_resp_len = SB_RFB_MAX;
1820
1821 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1822 return -EBUSY;
1823
1824 slot = &mvi->slot_info[tag];
1825 slot->tx = mvi->tx_prod;
1826 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1827 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1828 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1829 (port->taskfileset << TXQ_SRS_SHIFT));
1830
1831 if (task->ata_task.use_ncq)
1832 flags |= MCH_FPDMA;
1833 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1834 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1835 flags |= MCH_ATAPI;
1836 }
1837
1838 /* FIXME: fill in port multiplier number */
1839
1840 hdr->flags = cpu_to_le32(flags);
1841
1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1845 else
1846 hdr->tags = cpu_to_le32(tag);
1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1848
1849 /*
1850 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1851 */
1852
1853 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1854 buf_cmd = buf_tmp = slot->buf;
1855 buf_tmp_dma = slot->buf_dma;
1856
1857 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1858
1859 buf_tmp += MVS_ATA_CMD_SZ;
1860 buf_tmp_dma += MVS_ATA_CMD_SZ;
1861#if _MV_DUMP
1862 slot->cmd_size = MVS_ATA_CMD_SZ;
1863#endif
1864
1865 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1866 /* used for STP. unused for SATA? */
1867 buf_oaf = buf_tmp;
1868 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1869
1870 buf_tmp += MVS_OAF_SZ;
1871 buf_tmp_dma += MVS_OAF_SZ;
1872
1873 /* region 3: PRD table ********************************************* */
1874 buf_prd = buf_tmp;
1875 if (tei->n_elem)
1876 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1877 else
1878 hdr->prd_tbl = 0;
1879
1880 i = sizeof(struct mvs_prd) * tei->n_elem;
1881 buf_tmp += i;
1882 buf_tmp_dma += i;
1883
1884 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1885 /* FIXME: probably unused, for SATA. kept here just in case
1886 * we get a STP/SATA error information record
1887 */
1888 slot->response = buf_tmp;
1889 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1890
1891 req_len = sizeof(struct host_to_dev_fis);
1892 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1893 sizeof(struct mvs_err_info) - i;
1894
1895 /* request, response lengths */
1896 resp_len = min(resp_len, max_resp_len);
1897 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1898
1899 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1900 /* fill in command FIS and ATAPI CDB */
1901 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1902 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1903 memcpy(buf_cmd + STP_ATAPI_CMD,
1904 task->ata_task.atapi_packet, 16);
1905
1906 /* generate open address frame hdr (first 12 bytes) */
1907 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1908 buf_oaf[1] = task->dev->linkrate & 0xf;
1909 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1910 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1911
1912 /* fill in PRD (scatter/gather) table, if any */
1913 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1914 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1915 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1916 buf_prd++;
1917 }
1918
1919 return 0;
1920}
1921
1922static int mvs_task_prep_ssp(struct mvs_info *mvi,
1923 struct mvs_task_exec_info *tei)
1924{
1925 struct sas_task *task = tei->task;
1926 struct mvs_cmd_hdr *hdr = tei->hdr;
1927 struct mvs_port *port = tei->port;
1928 struct mvs_slot_info *slot;
1929 struct scatterlist *sg;
1930 struct mvs_prd *buf_prd;
1931 struct ssp_frame_hdr *ssp_hdr;
1932 void *buf_tmp;
1933 u8 *buf_cmd, *buf_oaf, fburst = 0;
1934 dma_addr_t buf_tmp_dma;
1935 u32 flags;
1936 u32 resp_len, req_len, i, tag = tei->tag;
1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1939
1940 slot = &mvi->slot_info[tag];
1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1944 slot->tx = mvi->tx_prod;
1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1947 (phy_mask << TXQ_PHY_SHIFT));
1948
1949 flags = MCH_RETRY;
1950 if (task->ssp_task.enable_first_burst) {
1951 flags |= MCH_FBURST;
1952 fburst = (1 << 7);
1953 }
1954 hdr->flags = cpu_to_le32(flags |
1955 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1956 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1957
1958 hdr->tags = cpu_to_le32(tag);
1959 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1960
1961 /*
1962 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1963 */
1964
1965 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1966 buf_cmd = buf_tmp = slot->buf;
1967 buf_tmp_dma = slot->buf_dma;
1968
1969 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1970
1971 buf_tmp += MVS_SSP_CMD_SZ;
1972 buf_tmp_dma += MVS_SSP_CMD_SZ;
1973#if _MV_DUMP
1974 slot->cmd_size = MVS_SSP_CMD_SZ;
1975#endif
1976
1977 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1978 buf_oaf = buf_tmp;
1979 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1980
1981 buf_tmp += MVS_OAF_SZ;
1982 buf_tmp_dma += MVS_OAF_SZ;
1983
1984 /* region 3: PRD table ********************************************* */
1985 buf_prd = buf_tmp;
1986 if (tei->n_elem)
1987 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1988 else
1989 hdr->prd_tbl = 0;
1990
1991 i = sizeof(struct mvs_prd) * tei->n_elem;
1992 buf_tmp += i;
1993 buf_tmp_dma += i;
1994
1995 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1996 slot->response = buf_tmp;
1997 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1998
1999 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
2000 sizeof(struct mvs_err_info) - i;
2001 resp_len = min(resp_len, max_resp_len);
2002
2003 req_len = sizeof(struct ssp_frame_hdr) + 28;
2004
2005 /* request, response lengths */
2006 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
2007
2008 /* generate open address frame hdr (first 12 bytes) */
2009 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
2010 buf_oaf[1] = task->dev->linkrate & 0xf;
2011 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
2012 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
2013
2014 /* fill in SSP frame header (Command Table.SSP frame header) */
2015 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
2016 ssp_hdr->frame_type = SSP_COMMAND;
2017 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
2018 HASHED_SAS_ADDR_SIZE);
2019 memcpy(ssp_hdr->hashed_src_addr,
2020 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
2021 ssp_hdr->tag = cpu_to_be16(tag);
2022
2023 /* fill in command frame IU */
2024 buf_cmd += sizeof(*ssp_hdr);
2025 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
2026 buf_cmd[9] = fburst | task->ssp_task.task_attr |
2027 (task->ssp_task.task_prio << 3);
2028 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
2029
2030 /* fill in PRD (scatter/gather) table, if any */
2031 for_each_sg(task->scatter, sg, tei->n_elem, i) {
2032 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
2033 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
2034 buf_prd++;
2035 }
2036
2037 return 0;
2038}
2039
2040static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
2041{
2042 struct domain_device *dev = task->dev;
2043 struct mvs_info *mvi = dev->port->ha->lldd_ha;
2044 struct pci_dev *pdev = mvi->pdev;
2045 void __iomem *regs = mvi->regs;
2046 struct mvs_task_exec_info tei;
2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
2050 unsigned long flags;
2051 u32 n = num, pass = 0;
2052
2053 spin_lock_irqsave(&mvi->lock, flags);
2054 do {
2055 dev = t->dev;
2056 tei.port = &mvi->port[dev->port->id];
2057
2058 if (!tei.port->port_attached) {
2059 if (sas_protocol_ata(t->task_proto)) {
2060 rc = SAS_PHY_DOWN;
2061 goto out_done;
2062 } else {
2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
2072 }
2073
2074 if (!sas_protocol_ata(t->task_proto)) {
2075 if (t->num_scatter) {
2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
2077 t->num_scatter,
2078 t->data_dir);
2079 if (!n_elem) {
2080 rc = -ENOMEM;
2081 goto err_out;
2082 }
2083 }
2084 } else {
2085 n_elem = t->num_scatter;
2086 }
2087
2088 rc = mvs_tag_alloc(mvi, &tag);
2089 if (rc)
2090 goto err_out;
2091
2092 slot = &mvi->slot_info[tag];
2093 t->lldd_task = NULL;
2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2096 tei.task = t;
2097 tei.hdr = &mvi->slot[tag];
2098 tei.tag = tag;
2099 tei.n_elem = n_elem;
2100
2101 switch (t->task_proto) {
2102 case SAS_PROTOCOL_SMP:
2103 rc = mvs_task_prep_smp(mvi, &tei);
2104 break;
2105 case SAS_PROTOCOL_SSP:
2106 rc = mvs_task_prep_ssp(mvi, &tei);
2107 break;
2108 case SAS_PROTOCOL_SATA:
2109 case SAS_PROTOCOL_STP:
2110 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2111 rc = mvs_task_prep_ata(mvi, &tei);
2112 break;
2113 default:
2114 dev_printk(KERN_ERR, &pdev->dev,
2115 "unknown sas_task proto: 0x%x\n",
2116 t->task_proto);
2117 rc = -EINVAL;
2118 break;
2119 }
2120
2121 if (rc)
2122 goto err_out_tag;
2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
2128 /* TODO: select normal or high priority */
2129
2130 spin_lock(&t->task_state_lock);
2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
2132 spin_unlock(&t->task_state_lock);
2133
2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
2135
2136 ++pass;
2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
2138 if (n > 1)
2139 t = list_entry(t->list.next, struct sas_task, list);
2140 } while (--n);
2141
2142 rc = 0;
2143 goto out_done;
2144
2145err_out_tag:
2146 mvs_tag_free(mvi, tag);
2147err_out:
2148 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
2149 if (!sas_protocol_ata(t->task_proto))
2150 if (n_elem)
2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
2152 t->data_dir);
2153out_done:
2154 if (pass)
2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
2156 spin_unlock_irqrestore(&mvi->lock, flags);
2157 return rc;
2158}
2159
2160static int mvs_task_abort(struct sas_task *task)
2161{
2162 int rc;
2163 unsigned long flags;
2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
2167
2168 spin_lock_irqsave(&task->task_state_lock, flags);
2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
2172 goto out_done;
2173 }
2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
2175
2176 switch (task->task_proto) {
2177 case SAS_PROTOCOL_SMP:
2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
2179 break;
2180 case SAS_PROTOCOL_SSP:
2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
2182 break;
2183 case SAS_PROTOCOL_SATA:
2184 case SAS_PROTOCOL_STP:
2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
2190 (void *)&task->ata_task.fis, 0);
2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
2200 break;
2201 }
2202 default:
2203 break;
2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
2215out_done:
2216 return rc;
2217}
2218
2219static void mvs_free(struct mvs_info *mvi)
2220{
2221 int i;
2222
2223 if (!mvi)
2224 return;
2225
2226 for (i = 0; i < MVS_SLOTS; i++) {
2227 struct mvs_slot_info *slot = &mvi->slot_info[i];
2228
2229 if (slot->buf)
2230 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
2231 slot->buf, slot->buf_dma);
2232 }
2233
2234 if (mvi->tx)
2235 dma_free_coherent(&mvi->pdev->dev,
2236 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2237 mvi->tx, mvi->tx_dma);
2238 if (mvi->rx_fis)
2239 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2240 mvi->rx_fis, mvi->rx_fis_dma);
2241 if (mvi->rx)
2242 dma_free_coherent(&mvi->pdev->dev,
2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2244 mvi->rx, mvi->rx_dma);
2245 if (mvi->slot)
2246 dma_free_coherent(&mvi->pdev->dev,
2247 sizeof(*mvi->slot) * MVS_SLOTS,
2248 mvi->slot, mvi->slot_dma);
2249#ifdef MVS_ENABLE_PERI
2250 if (mvi->peri_regs)
2251 iounmap(mvi->peri_regs);
2252#endif
2253 if (mvi->regs)
2254 iounmap(mvi->regs);
2255 if (mvi->shost)
2256 scsi_host_put(mvi->shost);
2257 kfree(mvi->sas.sas_port);
2258 kfree(mvi->sas.sas_phy);
2259 kfree(mvi);
2260}
2261
2262/* FIXME: locking? */
2263static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2264 void *funcdata)
2265{
2266 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2267 int rc = 0, phy_id = sas_phy->id;
2268 u32 tmp;
2269
2270 tmp = mvs_read_phy_ctl(mvi, phy_id);
2271
2272 switch (func) {
2273 case PHY_FUNC_SET_LINK_RATE:{
2274 struct sas_phy_linkrates *rates = funcdata;
2275 u32 lrmin = 0, lrmax = 0;
2276
2277 lrmin = (rates->minimum_linkrate << 8);
2278 lrmax = (rates->maximum_linkrate << 12);
2279
2280 if (lrmin) {
2281 tmp &= ~(0xf << 8);
2282 tmp |= lrmin;
2283 }
2284 if (lrmax) {
2285 tmp &= ~(0xf << 12);
2286 tmp |= lrmax;
2287 }
2288 mvs_write_phy_ctl(mvi, phy_id, tmp);
2289 break;
2290 }
2291
2292 case PHY_FUNC_HARD_RESET:
2293 if (tmp & PHY_RST_HARD)
2294 break;
2295 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2296 break;
2297
2298 case PHY_FUNC_LINK_RESET:
2299 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2300 break;
2301
2302 case PHY_FUNC_DISABLE:
2303 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2304 default:
2305 rc = -EOPNOTSUPP;
2306 }
2307
2308 return rc;
2309}
2310
2311static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2312{
2313 struct mvs_phy *phy = &mvi->phy[phy_id];
2314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2315
2316 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2317 sas_phy->class = SAS;
2318 sas_phy->iproto = SAS_PROTOCOL_ALL;
2319 sas_phy->tproto = 0;
2320 sas_phy->type = PHY_TYPE_PHYSICAL;
2321 sas_phy->role = PHY_ROLE_INITIATOR;
2322 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2323 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2324
2325 sas_phy->id = phy_id;
2326 sas_phy->sas_addr = &mvi->sas_addr[0];
2327 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2328 sas_phy->ha = &mvi->sas;
2329 sas_phy->lldd_phy = phy;
2330}
2331
2332static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2333 const struct pci_device_id *ent)
2334{
2335 struct mvs_info *mvi;
2336 unsigned long res_start, res_len, res_flag;
2337 struct asd_sas_phy **arr_phy;
2338 struct asd_sas_port **arr_port;
2339 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2340 int i;
2341
2342 /*
2343 * alloc and init our per-HBA mvs_info struct
2344 */
2345
2346 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2347 if (!mvi)
2348 return NULL;
2349
2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2354 mvi->pdev = pdev;
2355 mvi->chip = chip;
2356
2357 if (pdev->device == 0x6440 && pdev->revision == 0)
2358 mvi->flags |= MVF_PHY_PWR_FIX;
2359
2360 /*
2361 * alloc and init SCSI, SAS glue
2362 */
2363
2364 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2365 if (!mvi->shost)
2366 goto err_out;
2367
2368 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2369 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2370 if (!arr_phy || !arr_port)
2371 goto err_out;
2372
2373 for (i = 0; i < MVS_MAX_PHYS; i++) {
2374 mvs_phy_init(mvi, i);
2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2381 }
2382
2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2384 mvi->shost->transportt = mvs_stt;
2385 mvi->shost->max_id = 21;
2386 mvi->shost->max_lun = ~0;
2387 mvi->shost->max_channel = 0;
2388 mvi->shost->max_cmd_len = 16;
2389
2390 mvi->sas.sas_ha_name = DRV_NAME;
2391 mvi->sas.dev = &pdev->dev;
2392 mvi->sas.lldd_module = THIS_MODULE;
2393 mvi->sas.sas_addr = &mvi->sas_addr[0];
2394 mvi->sas.sas_phy = arr_phy;
2395 mvi->sas.sas_port = arr_port;
2396 mvi->sas.num_phys = chip->n_phy;
2397 mvi->sas.lldd_max_execute_num = 1;
2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2401 mvi->sas.lldd_ha = mvi;
2402 mvi->sas.core.shost = mvi->shost;
2403
2404 mvs_tag_init(mvi);
2405
2406 /*
2407 * ioremap main and peripheral registers
2408 */
2409
2410#ifdef MVS_ENABLE_PERI
2411 res_start = pci_resource_start(pdev, 2);
2412 res_len = pci_resource_len(pdev, 2);
2413 if (!res_start || !res_len)
2414 goto err_out;
2415
2416 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2417 if (!mvi->peri_regs)
2418 goto err_out;
2419#endif
2420
2421 res_start = pci_resource_start(pdev, 4);
2422 res_len = pci_resource_len(pdev, 4);
2423 if (!res_start || !res_len)
2424 goto err_out;
2425
2426 res_flag = pci_resource_flags(pdev, 4);
2427 if (res_flag & IORESOURCE_CACHEABLE)
2428 mvi->regs = ioremap(res_start, res_len);
2429 else
2430 mvi->regs = ioremap_nocache(res_start, res_len);
2431
2432 if (!mvi->regs)
2433 goto err_out;
2434
2435 /*
2436 * alloc and init our DMA areas
2437 */
2438
2439 mvi->tx = dma_alloc_coherent(&pdev->dev,
2440 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2441 &mvi->tx_dma, GFP_KERNEL);
2442 if (!mvi->tx)
2443 goto err_out;
2444 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2445
2446 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2447 &mvi->rx_fis_dma, GFP_KERNEL);
2448 if (!mvi->rx_fis)
2449 goto err_out;
2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2451
2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2454 &mvi->rx_dma, GFP_KERNEL);
2455 if (!mvi->rx)
2456 goto err_out;
2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2458
2459 mvi->rx[0] = cpu_to_le32(0xfff);
2460 mvi->rx_cons = 0xfff;
2461
2462 mvi->slot = dma_alloc_coherent(&pdev->dev,
2463 sizeof(*mvi->slot) * MVS_SLOTS,
2464 &mvi->slot_dma, GFP_KERNEL);
2465 if (!mvi->slot)
2466 goto err_out;
2467 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2468
2469 for (i = 0; i < MVS_SLOTS; i++) {
2470 struct mvs_slot_info *slot = &mvi->slot_info[i];
2471
2472 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2473 &slot->buf_dma, GFP_KERNEL);
2474 if (!slot->buf)
2475 goto err_out;
2476 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2477 }
2478
2479 /* finally, read NVRAM to get our SAS address */
2480 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2481 goto err_out;
2482 return mvi;
2483
2484err_out:
2485 mvs_free(mvi);
2486 return NULL;
2487}
2488
2489static u32 mvs_cr32(void __iomem *regs, u32 addr)
2490{
2491 mw32(CMD_ADDR, addr);
2492 return mr32(CMD_DATA);
2493}
2494
2495static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2496{
2497 mw32(CMD_ADDR, addr);
2498 mw32(CMD_DATA, val);
2499}
2500
2501static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2502{
2503 void __iomem *regs = mvi->regs;
2504 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2505 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2506}
2507
2508static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2509{
2510 void __iomem *regs = mvi->regs;
2511 if (port < 4)
2512 mw32(P0_SER_CTLSTAT + port * 4, val);
2513 else
2514 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2515}
2516
2517static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2518{
2519 void __iomem *regs = mvi->regs + off;
2520 void __iomem *regs2 = mvi->regs + off2;
2521 return (port < 4)?readl(regs + port * 8):
2522 readl(regs2 + (port - 4) * 8);
2523}
2524
2525static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2526 u32 port, u32 val)
2527{
2528 void __iomem *regs = mvi->regs + off;
2529 void __iomem *regs2 = mvi->regs + off2;
2530 if (port < 4)
2531 writel(val, regs + port * 8);
2532 else
2533 writel(val, regs2 + (port - 4) * 8);
2534}
2535
2536static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2537{
2538 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2539}
2540
2541static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2542{
2543 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2544}
2545
2546static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2547{
2548 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2549}
2550
2551static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2552{
2553 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2554}
2555
2556static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2557{
2558 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2559}
2560
2561static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2562{
2563 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2564}
2565
2566static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2567{
2568 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2569}
2570
2571static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2572{
2573 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2574}
2575
2576static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2577{
2578 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2579}
2580
2581static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2582{
2583 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2584}
2585
2586static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2587{
2588 void __iomem *regs = mvi->regs;
2589 u32 tmp;
2590
2591 /* workaround for SATA R-ERR, to ignore phy glitch */
2592 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2593 tmp &= ~(1 << 9);
2594 tmp |= (1 << 10);
2595 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2596
2597 /* enable retry 127 times */
2598 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2599
2600 /* extend open frame timeout to max */
2601 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2602 tmp &= ~0xffff;
2603 tmp |= 0x3fff;
2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2605
2606 /* workaround for WDTIMEOUT , set to 550 ms */
2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2608
2609 /* not to halt for different port op during wideport link change */
2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2611
2612 /* workaround for Seagate disk not-found OOB sequence, recv
2613 * COMINIT before sending out COMWAKE */
2614 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2615 tmp &= 0x0000ffff;
2616 tmp |= 0x00fa0000;
2617 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2618
2619 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2620 tmp &= 0x1fffffff;
2621 tmp |= (2U << 29); /* 8 ms retry */
2622 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2623
2624 /* TEST - for phy decoding error, adjust voltage levels */
2625 mw32(P0_VSR_ADDR + 0, 0x8);
2626 mw32(P0_VSR_DATA + 0, 0x2F0);
2627
2628 mw32(P0_VSR_ADDR + 8, 0x8);
2629 mw32(P0_VSR_DATA + 8, 0x2F0);
2630
2631 mw32(P0_VSR_ADDR + 16, 0x8);
2632 mw32(P0_VSR_DATA + 16, 0x2F0);
2633
2634 mw32(P0_VSR_ADDR + 24, 0x8);
2635 mw32(P0_VSR_DATA + 24, 0x2F0);
2636
2637}
2638
2639static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2640{
2641 void __iomem *regs = mvi->regs;
2642 u32 tmp;
2643
2644 tmp = mr32(PCS);
2645 if (mvi->chip->n_phy <= 4)
2646 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2647 else
2648 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2649 mw32(PCS, tmp);
2650}
2651
2652static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2653{
2654 void __iomem *regs = mvi->regs;
2655 u32 reg;
2656 struct mvs_phy *phy = &mvi->phy[i];
2657
2658 /* TODO check & save device type */
2659 reg = mr32(GBL_PORT_TYPE);
2660
2661 if (reg & MODE_SAS_SATA & (1 << i))
2662 phy->phy_type |= PORT_TYPE_SAS;
2663 else
2664 phy->phy_type |= PORT_TYPE_SATA;
2665}
2666
2667static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2668{
2669 u32 *s = (u32 *) buf;
2670
2671 if (!s)
2672 return NULL;
2673
2674 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2675 s[3] = mvs_read_port_cfg_data(mvi, i);
2676
2677 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2678 s[2] = mvs_read_port_cfg_data(mvi, i);
2679
2680 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2681 s[1] = mvs_read_port_cfg_data(mvi, i);
2682
2683 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2684 s[0] = mvs_read_port_cfg_data(mvi, i);
2685
2686 return (void *)s;
2687}
2688
2689static u32 mvs_is_sig_fis_received(u32 irq_status)
2690{
2691 return irq_status & PHYEV_SIG_FIS;
2692}
2693
2694static void mvs_update_wideport(struct mvs_info *mvi, int i)
2695{
2696 struct mvs_phy *phy = &mvi->phy[i];
2697 struct mvs_port *port = phy->port;
2698 int j, no;
2699
2700 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2701 if (no & 1) {
2702 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2703 mvs_write_port_cfg_data(mvi, no,
2704 port->wide_port_phymap);
2705 } else {
2706 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2707 mvs_write_port_cfg_data(mvi, no, 0);
2708 }
2709}
2710
2711static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2712{
2713 u32 tmp;
2714 struct mvs_phy *phy = &mvi->phy[i];
2715 struct mvs_port *port = phy->port;;
2716
2717 tmp = mvs_read_phy_ctl(mvi, i);
2718
2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2720 if (!port)
2721 phy->phy_attached = 1;
2722 return tmp;
2723 }
2724
2725 if (port) {
2726 if (phy->phy_type & PORT_TYPE_SAS) {
2727 port->wide_port_phymap &= ~(1U << i);
2728 if (!port->wide_port_phymap)
2729 port->port_attached = 0;
2730 mvs_update_wideport(mvi, i);
2731 } else if (phy->phy_type & PORT_TYPE_SATA)
2732 port->port_attached = 0;
2733 mvs_free_reg_set(mvi, phy->port);
2734 phy->port = NULL;
2735 phy->phy_attached = 0;
2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2737 }
2738 return 0;
2739}
2740
2741static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2742 int get_st)
2743{
2744 struct mvs_phy *phy = &mvi->phy[i];
2745 struct pci_dev *pdev = mvi->pdev;
2746 u32 tmp;
2747 u64 tmp64;
2748
2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2750 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2751
2752 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2753 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2754
2755 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2756 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2757
2758 if (get_st) {
2759 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2760 phy->phy_status = mvs_is_phy_ready(mvi, i);
2761 }
2762
2763 if (phy->phy_status) {
2764 u32 phy_st;
2765 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2766
2767 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2768 phy_st = mvs_read_port_cfg_data(mvi, i);
2769
2770 sas_phy->linkrate =
2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2773 phy->minimum_linkrate =
2774 (phy->phy_status &
2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2776 phy->maximum_linkrate =
2777 (phy->phy_status &
2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2779
2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2789 phy->identify.device_type =
2790 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2791
2792 if (phy->identify.device_type == SAS_END_DEV)
2793 phy->identify.target_port_protocols =
2794 SAS_PROTOCOL_SSP;
2795 else if (phy->identify.device_type != NO_DEVICE)
2796 phy->identify.target_port_protocols =
2797 SAS_PROTOCOL_SMP;
2798 if (phy_st & PHY_OOB_DTCTD)
2799 sas_phy->oob_mode = SAS_OOB_MODE;
2800 phy->frame_rcvd_size =
2801 sizeof(struct sas_identify_frame);
2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2806 if (phy_st & PHY_OOB_DTCTD)
2807 sas_phy->oob_mode = SATA_OOB_MODE;
2808 phy->frame_rcvd_size =
2809 sizeof(struct dev_to_host_fis);
2810 mvs_get_d2h_reg(mvi, i,
2811 (void *)sas_phy->frame_rcvd);
2812 } else {
2813 dev_printk(KERN_DEBUG, &pdev->dev,
2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2817 }
2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2828 dev_printk(KERN_DEBUG, &pdev->dev,
2829 "Rate = %x , type = %d\n",
2830 sas_phy->linkrate, phy->phy_type);
2831
2832 /* workaround for HW phy decoding error on 1.5g disk drive */
2833 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2834 tmp = mvs_read_port_vsr_data(mvi, i);
2835 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2836 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2837 SAS_LINK_RATE_1_5_GBPS)
2838 tmp &= ~PHY_MODE6_LATECLK;
2839 else
2840 tmp |= PHY_MODE6_LATECLK;
2841 mvs_write_port_vsr_data(mvi, i, tmp);
2842
2843 }
2844out_done:
2845 if (get_st)
2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2847}
2848
2849static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2850{
2851 struct sas_ha_struct *sas_ha = sas_phy->ha;
2852 struct mvs_info *mvi = sas_ha->lldd_ha;
2853 struct asd_sas_port *sas_port = sas_phy->port;
2854 struct mvs_phy *phy = sas_phy->lldd_phy;
2855 struct mvs_port *port = &mvi->port[sas_port->id];
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&mvi->lock, flags);
2859 port->port_attached = 1;
2860 phy->port = port;
2861 port->taskfileset = MVS_ID_NOT_MAPPED;
2862 if (phy->phy_type & PORT_TYPE_SAS) {
2863 port->wide_port_phymap = sas_port->phy_mask;
2864 mvs_update_wideport(mvi, sas_phy->id);
2865 }
2866 spin_unlock_irqrestore(&mvi->lock, flags);
2867}
2868
2869static int mvs_I_T_nexus_reset(struct domain_device *dev)
2870{
2871 return TMF_RESP_FUNC_FAILED;
2872}
2873
2874static int __devinit mvs_hw_init(struct mvs_info *mvi)
2875{
2876 void __iomem *regs = mvi->regs;
2877 int i;
2878 u32 tmp, cctl;
2879
2880 /* make sure interrupts are masked immediately (paranoia) */
2881 mw32(GBL_CTL, 0);
2882 tmp = mr32(GBL_CTL);
2883
2884 /* Reset Controller */
2885 if (!(tmp & HBA_RST)) {
2886 if (mvi->flags & MVF_PHY_PWR_FIX) {
2887 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2888 tmp &= ~PCTL_PWR_ON;
2889 tmp |= PCTL_OFF;
2890 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2891
2892 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2893 tmp &= ~PCTL_PWR_ON;
2894 tmp |= PCTL_OFF;
2895 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2896 }
2897
2898 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2899 mw32_f(GBL_CTL, HBA_RST);
2900 }
2901
2902 /* wait for reset to finish; timeout is just a guess */
2903 i = 1000;
2904 while (i-- > 0) {
2905 msleep(10);
2906
2907 if (!(mr32(GBL_CTL) & HBA_RST))
2908 break;
2909 }
2910 if (mr32(GBL_CTL) & HBA_RST) {
2911 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2912 return -EBUSY;
2913 }
2914
2915 /* Init Chip */
2916 /* make sure RST is set; HBA_RST /should/ have done that for us */
2917 cctl = mr32(CTL);
2918 if (cctl & CCTL_RST)
2919 cctl &= ~CCTL_RST;
2920 else
2921 mw32_f(CTL, cctl | CCTL_RST);
2922
2923 /* write to device control _AND_ device status register? - A.C. */
2924 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2925 tmp &= ~PRD_REQ_MASK;
2926 tmp |= PRD_REQ_SIZE;
2927 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2928
2929 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2930 tmp |= PCTL_PWR_ON;
2931 tmp &= ~PCTL_OFF;
2932 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2933
2934 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2935 tmp |= PCTL_PWR_ON;
2936 tmp &= ~PCTL_OFF;
2937 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2938
2939 mw32_f(CTL, cctl);
2940
2941 /* reset control */
2942 mw32(PCS, 0); /*MVS_PCS */
2943
2944 mvs_phy_hacks(mvi);
2945
2946 mw32(CMD_LIST_LO, mvi->slot_dma);
2947 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2948
2949 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2950 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2951
2952 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2953 mw32(TX_LO, mvi->tx_dma);
2954 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2955
2956 mw32(RX_CFG, MVS_RX_RING_SZ);
2957 mw32(RX_LO, mvi->rx_dma);
2958 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2959
2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(1100);
2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2966 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2967
2968 mvs_detect_porttype(mvi, i);
2969
2970 /* set phy local SAS address */
2971 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2972 mvs_write_port_cfg_data(mvi, i, lo);
2973 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2974 mvs_write_port_cfg_data(mvi, i, hi);
2975
2976 /* reset phy */
2977 tmp = mvs_read_phy_ctl(mvi, i);
2978 tmp |= PHY_RST;
2979 mvs_write_phy_ctl(mvi, i, tmp);
2980 }
2981
2982 msleep(100);
2983
2984 for (i = 0; i < mvi->chip->n_phy; i++) {
2985 /* clear phy int status */
2986 tmp = mvs_read_port_irq_stat(mvi, i);
2987 tmp &= ~PHYEV_SIG_FIS;
2988 mvs_write_port_irq_stat(mvi, i, tmp);
2989
2990 /* set phy int mask */
2991 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2992 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2993 mvs_write_port_irq_mask(mvi, i, tmp);
2994
2995 msleep(100);
2996 mvs_update_phyinfo(mvi, i, 1);
2997 mvs_enable_xmt(mvi, i);
2998 }
2999
3000 /* FIXME: update wide port bitmaps */
3001
3002 /* little endian for open address and command table, etc. */
3003 /* A.C.
3004 * it seems that ( from the spec ) turning on big-endian won't
3005 * do us any good on big-endian machines, need further confirmation
3006 */
3007 cctl = mr32(CTL);
3008 cctl |= CCTL_ENDIAN_CMD;
3009 cctl |= CCTL_ENDIAN_DATA;
3010 cctl &= ~CCTL_ENDIAN_OPEN;
3011 cctl |= CCTL_ENDIAN_RSP;
3012 mw32_f(CTL, cctl);
3013
3014 /* reset CMD queue */
3015 tmp = mr32(PCS);
3016 tmp |= PCS_CMD_RST;
3017 mw32(PCS, tmp);
3018 /* interrupt coalescing may cause missing HW interrput in some case,
3019 * and the max count is 0x1ff, while our max slot is 0x200,
3020 * it will make count 0.
3021 */
3022 tmp = 0;
3023 mw32(INT_COAL, tmp);
3024
3025 tmp = 0x100;
3026 mw32(INT_COAL_TMOUT, tmp);
3027
3028 /* ladies and gentlemen, start your engines */
3029 mw32(TX_CFG, 0);
3030 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
3031 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
3032 /* enable CMD/CMPL_Q/RESP mode */
3033 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
3034
3035 /* enable completion queue interrupt */
3036 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
3037 mw32(INT_MASK, tmp);
3038
3039 /* Enable SRS interrupt */
3040 mw32(INT_MASK_SRS, 0xFF);
3041 return 0;
3042}
3043
3044static void __devinit mvs_print_info(struct mvs_info *mvi)
3045{
3046 struct pci_dev *pdev = mvi->pdev;
3047 static int printed_version;
3048
3049 if (!printed_version++)
3050 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3051
3052 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
3054}
3055
3056static int __devinit mvs_pci_init(struct pci_dev *pdev,
3057 const struct pci_device_id *ent)
3058{
3059 int rc;
3060 struct mvs_info *mvi;
3061 irq_handler_t irq_handler = mvs_interrupt;
3062
3063 rc = pci_enable_device(pdev);
3064 if (rc)
3065 return rc;
3066
3067 pci_set_master(pdev);
3068
3069 rc = pci_request_regions(pdev, DRV_NAME);
3070 if (rc)
3071 goto err_out_disable;
3072
3073 rc = pci_go_64(pdev);
3074 if (rc)
3075 goto err_out_regions;
3076
3077 mvi = mvs_alloc(pdev, ent);
3078 if (!mvi) {
3079 rc = -ENOMEM;
3080 goto err_out_regions;
3081 }
3082
3083 rc = mvs_hw_init(mvi);
3084 if (rc)
3085 goto err_out_mvi;
3086
3087#ifndef MVS_DISABLE_MSI
3088 if (!pci_enable_msi(pdev)) {
3089 u32 tmp;
3090 void __iomem *regs = mvi->regs;
3091 mvi->flags |= MVF_MSI;
3092 irq_handler = mvs_msi_interrupt;
3093 tmp = mr32(PCS);
3094 mw32(PCS, tmp | PCS_SELF_CLEAR);
3095 }
3096#endif
3097
3098 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
3099 if (rc)
3100 goto err_out_msi;
3101
3102 rc = scsi_add_host(mvi->shost, &pdev->dev);
3103 if (rc)
3104 goto err_out_irq;
3105
3106 rc = sas_register_ha(&mvi->sas);
3107 if (rc)
3108 goto err_out_shost;
3109
3110 pci_set_drvdata(pdev, mvi);
3111
3112 mvs_print_info(mvi);
3113
3114 mvs_hba_interrupt_enable(mvi);
3115
3116 scsi_scan_host(mvi->shost);
3117
3118 return 0;
3119
3120err_out_shost:
3121 scsi_remove_host(mvi->shost);
3122err_out_irq:
3123 free_irq(pdev->irq, mvi);
3124err_out_msi:
3125 if (mvi->flags |= MVF_MSI)
3126 pci_disable_msi(pdev);
3127err_out_mvi:
3128 mvs_free(mvi);
3129err_out_regions:
3130 pci_release_regions(pdev);
3131err_out_disable:
3132 pci_disable_device(pdev);
3133 return rc;
3134}
3135
3136static void __devexit mvs_pci_remove(struct pci_dev *pdev)
3137{
3138 struct mvs_info *mvi = pci_get_drvdata(pdev);
3139
3140 pci_set_drvdata(pdev, NULL);
3141
3142 if (mvi) {
3143 sas_unregister_ha(&mvi->sas);
3144 mvs_hba_interrupt_disable(mvi);
3145 sas_remove_host(mvi->shost);
3146 scsi_remove_host(mvi->shost);
3147
3148 free_irq(pdev->irq, mvi);
3149 if (mvi->flags & MVF_MSI)
3150 pci_disable_msi(pdev);
3151 mvs_free(mvi);
3152 pci_release_regions(pdev);
3153 }
3154 pci_disable_device(pdev);
3155}
3156
3157static struct sas_domain_function_template mvs_transport_ops = {
3158 .lldd_execute_task = mvs_task_exec,
3159 .lldd_control_phy = mvs_phy_control,
3160 .lldd_abort_task = mvs_task_abort,
3161 .lldd_port_formed = mvs_port_formed,
3162 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
3163};
3164
3165static struct pci_device_id __devinitdata mvs_pci_table[] = {
3166 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
3167 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3168 {
3169 .vendor = PCI_VENDOR_ID_MARVELL,
3170 .device = 0x6440,
3171 .subvendor = PCI_ANY_ID,
3172 .subdevice = 0x6480,
3173 .class = 0,
3174 .class_mask = 0,
3175 .driver_data = chip_6480,
3176 },
3177 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
3178 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
3179
3180 { } /* terminate list */
3181};
3182
3183static struct pci_driver mvs_pci_driver = {
3184 .name = DRV_NAME,
3185 .id_table = mvs_pci_table,
3186 .probe = mvs_pci_init,
3187 .remove = __devexit_p(mvs_pci_remove),
3188};
3189
3190static int __init mvs_init(void)
3191{
3192 int rc;
3193
3194 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
3195 if (!mvs_stt)
3196 return -ENOMEM;
3197
3198 rc = pci_register_driver(&mvs_pci_driver);
3199 if (rc)
3200 goto err_out;
3201
3202 return 0;
3203
3204err_out:
3205 sas_release_transport(mvs_stt);
3206 return rc;
3207}
3208
3209static void __exit mvs_exit(void)
3210{
3211 pci_unregister_driver(&mvs_pci_driver);
3212 sas_release_transport(mvs_stt);
3213}
3214
3215module_init(mvs_init);
3216module_exit(mvs_exit);
3217
3218MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
3219MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
3220MODULE_VERSION(DRV_VERSION);
3221MODULE_LICENSE("GPL");
3222MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 00000000000..6de7af27e50
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
1#
2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the 88SE64XX/88SE94XX driver.
10#
11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 PCI-E 88SE94XX chip based host adapters.
35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 00000000000..52ac4264677
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
29mvsas-y += mv_init.o \
30 mv_sas.o \
31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 00000000000..10a5077b6ae
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
1/*
2 * Marvell 88SE64xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_64xx.h"
27#include "mv_chips.h"
28
29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 void __iomem *regs = mvi->regs;
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34
35 /* TODO check & save device type */
36 reg = mr32(MVS_GBL_PORT_TYPE);
37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
38 if (reg & MODE_SAS_SATA & (1 << i))
39 phy->phy_type |= PORT_TYPE_SAS;
40 else
41 phy->phy_type |= PORT_TYPE_SATA;
42}
43
44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
45{
46 void __iomem *regs = mvi->regs;
47 u32 tmp;
48
49 tmp = mr32(MVS_PCS);
50 if (mvi->chip->n_phy <= 4)
51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
108 else
109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
195}
196
197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
198{
199 void __iomem *regs = mvi->regs;
200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
240
241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
242{
243 void __iomem *regs = mvi->regs;
244 int i;
245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
260
261 /* Init Chip */
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
297
298 /* reset control */
299 mw32(MVS_PCS, 0); /* MVS_PCS */
300 /* init phys */
301 mvs_64xx_phy_hacks(mvi);
302
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
305
306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
308
309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
311
312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
313 mw32(MVS_TX_LO, mvi->tx_dma);
314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
315
316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
317 mw32(MVS_RX_LO, mvi->rx_dma);
318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
319
320 for (i = 0; i < mvi->chip->n_phy; i++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
325
326 mvs_64xx_enable_xmt(mvi, i);
327
328 mvs_64xx_phy_reset(mvi, i, 1);
329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405}
406
407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421{
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
428
429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436}
437
438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439{
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
481
482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
487
488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
495}
496
497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
498{
499 void __iomem *regs = mvi->regs;
500 u32 tmp, offs;
501
502 if (*tfs == MVS_ID_NOT_MAPPED)
503 return;
504
505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
506 if (*tfs < 16) {
507 tmp = mr32(MVS_PCS);
508 mw32(MVS_PCS, tmp & ~offs);
509 } else {
510 tmp = mr32(MVS_CTL);
511 mw32(MVS_CTL, tmp & ~offs);
512 }
513
514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
515 if (tmp)
516 mw32(MVS_INT_STAT_SRS_0, tmp);
517
518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
520}
521
522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
523{
524 int i;
525 u32 tmp, offs;
526 void __iomem *regs = mvi->regs;
527
528 if (*tfs != MVS_ID_NOT_MAPPED)
529 return 0;
530
531 tmp = mr32(MVS_PCS);
532
533 for (i = 0; i < mvi->chip->srs_sz; i++) {
534 if (i == 16)
535 tmp = mr32(MVS_CTL);
536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
537 if (!(tmp & offs)) {
538 *tfs = i;
539
540 if (i < 16)
541 mw32(MVS_PCS, tmp | offs);
542 else
543 mw32(MVS_CTL, tmp | offs);
544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
545 if (tmp)
546 mw32(MVS_INT_STAT_SRS_0, tmp);
547 return 0;
548 }
549 }
550 return MVS_ID_NOT_MAPPED;
551}
552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 00000000000..42e947d9795
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS64XX_REG_H_
26#define _MVS64XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
32/* enhanced mode registers (BAR4) */
33enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
42
43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
44 MVS_PCS = 0x104, /* SAS/SATA port control/status */
45 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
46 MVS_CMD_LIST_HI = 0x10C,
47 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
48 MVS_RX_FIS_HI = 0x114,
49
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67
68 /* ports 1-3 follow after this */
69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
71 /* ports 5-7 follow after this */
72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
79
80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
81 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
82
83 /* ports 1-3 follow after this */
84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
86 /* ports 5-7 follow after this */
87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
89
90 /* ports 1-3 follow after this */
91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
93 /* ports 5-7 follow after this */
94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
96};
97
98enum pci_cfg_registers {
99 PCR_PHY_CTL = 0x40,
100 PCR_PHY_CTL2 = 0x90,
101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
103};
104
105/* SAS/SATA Vendor Specific Port Registers */
106enum sas_sata_vsp_regs {
107 VSR_PHY_STAT = 0x00, /* Phy Status */
108 VSR_PHY_MODE1 = 0x01, /* phy tx */
109 VSR_PHY_MODE2 = 0x02, /* tx scc */
110 VSR_PHY_MODE3 = 0x03, /* pll */
111 VSR_PHY_MODE4 = 0x04, /* VCO */
112 VSR_PHY_MODE5 = 0x05, /* Rx */
113 VSR_PHY_MODE6 = 0x06, /* CDR */
114 VSR_PHY_MODE7 = 0x07, /* Impedance */
115 VSR_PHY_MODE8 = 0x08, /* Voltage */
116 VSR_PHY_MODE9 = 0x09, /* Test */
117 VSR_PHY_MODE10 = 0x0A, /* Power */
118 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
119 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
121};
122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
133struct mvs_prd {
134 __le64 addr; /* 64-bit buffer address */
135 __le32 reserved;
136 __le32 len; /* 16-bit length */
137};
138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 00000000000..0940fae19d2
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 00000000000..23ed9b16466
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 00000000000..a67e1c4172f
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#ifndef _MV_CHIPS_H_
27#define _MV_CHIPS_H_
28
29#define mr32(reg) readl(regs + reg)
30#define mw32(reg, val) writel((val), regs + reg)
31#define mw32_f(reg, val) do { \
32 mw32(reg, val); \
33 mr32(reg); \
34 } while (0)
35
36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
44{
45 void __iomem *regs = mvi->regs;
46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
48}
49
50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
51{
52 void __iomem *regs = mvi->regs;
53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
55}
56
57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
58{
59 void __iomem *regs = mvi->regs;
60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
62}
63
64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
65{
66 void __iomem *regs = mvi->regs;
67 if (port < 4)
68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
69 else
70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
71}
72
73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
75{
76 void __iomem *regs = mvi->regs + off;
77 void __iomem *regs2 = mvi->regs + off2;
78 return (port < 4) ? readl(regs + port * 8) :
79 readl(regs2 + (port - 4) * 8);
80}
81
82static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
83 u32 port, u32 val)
84{
85 void __iomem *regs = mvi->regs + off;
86 void __iomem *regs2 = mvi->regs + off2;
87 if (port < 4)
88 writel(val, regs + port * 8);
89 else
90 writel(val, regs2 + (port - 4) * 8);
91}
92
93static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
94{
95 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
96 MVS_P4_CFG_DATA, port);
97}
98
99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
101{
102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
103 MVS_P4_CFG_DATA, port, val);
104}
105
106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
108{
109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
112}
113
114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
115{
116 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
117 MVS_P4_VSR_DATA, port);
118}
119
120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
122{
123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
124 MVS_P4_VSR_DATA, port, val);
125}
126
127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
129{
130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
133}
134
135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
136{
137 return mvs_read_port(mvi, MVS_P0_INT_STAT,
138 MVS_P4_INT_STAT, port);
139}
140
141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
143{
144 mvs_write_port(mvi, MVS_P0_INT_STAT,
145 MVS_P4_INT_STAT, port, val);
146}
147
148static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
149{
150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
151 MVS_P4_INT_MASK, port);
152
153}
154
155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
157{
158 mvs_write_port(mvi, MVS_P0_INT_MASK,
159 MVS_P4_INT_MASK, port, val);
160}
161
162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 00000000000..f8cb9defb96
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
1/*
2 * Marvell 88SE64xx/88SE94xx const head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_
27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
37/* driver compile-time configuration */
38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
42 /* software requires power-of-2
43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
47
48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
55};
56
57/* unchangeable hardware details */
58enum hardware_details {
59 MVS_MAX_PHYS = 8, /* max. possible phys */
60 MVS_MAX_PORTS = 8, /* max. possible ports */
61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
64};
65
66/* peripheral registers (BAR2) */
67enum peripheral_registers {
68 SPI_CTL = 0x10, /* EEPROM control */
69 SPI_CMD = 0x14, /* EEPROM command */
70 SPI_DATA = 0x18, /* EEPROM data */
71};
72
73enum peripheral_register_bits {
74 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
75 TWSI_RD = (1U << 4), /* EEPROM read access */
76
77 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
78};
79
80enum hw_register_bits {
81 /* MVS_GBL_CTL */
82 INT_EN = (1U << 1), /* Global int enable */
83 HBA_RST = (1U << 0), /* HBA reset */
84
85 /* MVS_GBL_INT_STAT */
86 INT_XOR = (1U << 4), /* XOR engine event */
87 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
88
89 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
90 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
91 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
92 MODE_AUTO_DET_PORT6 = (1U << 14),
93 MODE_AUTO_DET_PORT5 = (1U << 13),
94 MODE_AUTO_DET_PORT4 = (1U << 12),
95 MODE_AUTO_DET_PORT3 = (1U << 11),
96 MODE_AUTO_DET_PORT2 = (1U << 10),
97 MODE_AUTO_DET_PORT1 = (1U << 9),
98 MODE_AUTO_DET_PORT0 = (1U << 8),
99 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
100 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
101 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
102 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
103 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
104 MODE_SAS_PORT6_MASK = (1U << 6),
105 MODE_SAS_PORT5_MASK = (1U << 5),
106 MODE_SAS_PORT4_MASK = (1U << 4),
107 MODE_SAS_PORT3_MASK = (1U << 3),
108 MODE_SAS_PORT2_MASK = (1U << 2),
109 MODE_SAS_PORT1_MASK = (1U << 1),
110 MODE_SAS_PORT0_MASK = (1U << 0),
111 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
112 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
113 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
114 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
115
116 /* SAS_MODE value may be
117 * dictated (in hw) by values
118 * of SATA_TARGET & AUTO_DET
119 */
120
121 /* MVS_TX_CFG */
122 TX_EN = (1U << 16), /* Enable TX */
123 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
124
125 /* MVS_RX_CFG */
126 RX_EN = (1U << 16), /* Enable RX */
127 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
128
129 /* MVS_INT_COAL */
130 COAL_EN = (1U << 16), /* Enable int coalescing */
131
132 /* MVS_INT_STAT, MVS_INT_MASK */
133 CINT_I2C = (1U << 31), /* I2C event */
134 CINT_SW0 = (1U << 30), /* software event 0 */
135 CINT_SW1 = (1U << 29), /* software event 1 */
136 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
137 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
138 CINT_MEM = (1U << 26), /* int mem parity err */
139 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
140 CINT_SRS = (1U << 3), /* SRS event */
141 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
142 CINT_DONE = (1U << 0), /* cmd completion */
143
144 /* shl for ports 1-3 */
145 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
146 CINT_PORT = (1U << 8), /* port0 event */
147 CINT_PORT_MASK_OFFSET = 8,
148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
151
152 /* TX (delivery) ring bits */
153 TXQ_CMD_SHIFT = 29,
154 TXQ_CMD_SSP = 1, /* SSP protocol */
155 TXQ_CMD_SMP = 2, /* SMP protocol */
156 TXQ_CMD_STP = 3, /* STP/SATA protocol */
157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
165 TXQ_SRS_SHIFT = 20, /* SATA register set */
166 TXQ_SRS_MASK = 0x7f,
167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
168 TXQ_PHY_MASK = 0xff,
169 TXQ_SLOT_MASK = 0xfff, /* slot number */
170
171 /* RX (completion) ring bits */
172 RXQ_GOOD = (1U << 23), /* Response good */
173 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
174 RXQ_CMD_RX = (1U << 20), /* target cmd received */
175 RXQ_ATTN = (1U << 19), /* attention */
176 RXQ_RSP = (1U << 18), /* response frame xfer'd */
177 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
178 RXQ_DONE = (1U << 16), /* cmd complete */
179 RXQ_SLOT_MASK = 0xfff, /* slot number */
180
181 /* mvs_cmd_hdr bits */
182 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
183 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
184
185 /* SSP initiator only */
186 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
187
188 /* SSP initiator or target */
189 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
190
191 /* SSP target only */
192 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
193 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
202 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
203 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
204 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
205 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
206 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
207 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
208 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
209
210 CCTL_RST = (1U << 5), /* port logic reset */
211
212 /* 0(LSB first), 1(MSB first) */
213 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
214 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
215 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
216 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
217
218 /* MVS_Px_SER_CTLSTAT (per-phy control) */
219 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
222 PHY_RST = (1U << 0), /* phy reset */
223 PHY_READY_MASK = (1U << 20),
224
225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
230 PHYEV_AN = (1U << 18), /* SATA async notification */
231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
232 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
233 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
234 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
235 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
236 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
237 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
238 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
239 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
240 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
241 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
242 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
243 PHYEV_ID_DONE = (1U << 2), /* identify done */
244 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
245 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
246
247 /* MVS_PCS */
248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
257 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
258 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
259
260 /* Port n Attached Device Info */
261 PORT_DEV_SSP_TRGT = (1U << 19),
262 PORT_DEV_SMP_TRGT = (1U << 18),
263 PORT_DEV_STP_TRGT = (1U << 17),
264 PORT_DEV_SSP_INIT = (1U << 11),
265 PORT_DEV_SMP_INIT = (1U << 10),
266 PORT_DEV_STP_INIT = (1U << 9),
267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
270 PORT_DEV_TRGT_MASK = (0x7U << 17),
271 PORT_DEV_INIT_MASK = (0x7U << 9),
272 PORT_DEV_TYPE_MASK = (0x7U << 0),
273
274 /* Port n PHY Status */
275 PHY_RDY = (1U << 2),
276 PHY_DW_SYNC = (1U << 1),
277 PHY_OOB_DTCTD = (1U << 0),
278
279 /* VSR */
280 /* PHYMODE 6 (CDB) */
281 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
282 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
283 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
284 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
285 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
286 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
287 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
288 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
289 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
290 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
291 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
292 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
293 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
294 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
295};
296
297/* SAS/SATA configuration port registers, aka phy registers */
298enum sas_sata_config_port_regs {
299 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
300 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
301 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
302 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
303 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
305 PHYR_SATA_CTL = 0x18, /* SATA control */
306 PHYR_PHY_STAT = 0x1C, /* PHY status */
307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
331};
332
333enum sas_cmd_port_registers {
334 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
346 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
347 CMD_ID_TEST = 0x134, /* ID test register */
348 CMD_PL_TIMER = 0x138, /* PL timer register */
349 CMD_WD_TIMER = 0x13c, /* WD timer register */
350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
357 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
358 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
359 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
360 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
361 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
362 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
363 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
364 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
365 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
366 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
367 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
368 CMD_RESET_COUNT = 0x188, /* Reset Count */
369 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
370 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
371 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
372 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
373 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
374 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
375 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
376 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
377 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
378 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
379 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
380 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
381 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
382 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
384};
385
386enum mvs_info_flags {
387 MVF_MSI = (1U << 0), /* MSI is enabled */
388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
390};
391
392enum mvs_event_flags {
393 PHY_PLUG_EVENT = (3U),
394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
396};
397
398enum mvs_port_type {
399 PORT_TGT_MASK = (1U << 5),
400 PORT_INIT_PORT = (1U << 4),
401 PORT_TGT_PORT = (1U << 3),
402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
403 PORT_TYPE_SAS = (1U << 1),
404 PORT_TYPE_SATA = (1U << 0),
405};
406
407/* Command Table Format */
408enum ct_format {
409 /* SSP */
410 SSP_F_H = 0x00,
411 SSP_F_IU = 0x18,
412 SSP_F_MAX = 0x4D,
413 /* STP */
414 STP_CMD_FIS = 0x00,
415 STP_ATAPI_CMD = 0x40,
416 STP_F_MAX = 0x10,
417 /* SMP */
418 SMP_F_T = 0x00,
419 SMP_F_DEP = 0x01,
420 SMP_F_MAX = 0x101,
421};
422
423enum status_buffer {
424 SB_EIR_OFF = 0x00, /* Error Information Record */
425 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
426 SB_RFB_MAX = 0x400, /* RFB size*/
427};
428
429enum error_info_rec {
430 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
431 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
432 RSP_OVER = (1U << 29), /* rsp buffer overflow */
433 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
434 UNK_FIS = (1U << 27), /* unknown FIS */
435 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
436 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
437 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
438 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
439 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
440 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
441 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
442 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
443 INTERLOCK = (1U << 15), /* interlock error */
444 NAK = (1U << 14), /* NAK rx'd */
445 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
446 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
447 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
448 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
449 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
450 STP_RES_BSY = (1U << 8), /* STP resources busy */
451 BREAK = (1U << 7), /* break received */
452 BAD_DEST = (1U << 6), /* bad destination */
453 BAD_PROTO = (1U << 5), /* protocol not supported */
454 BAD_RATE = (1U << 4), /* cxn rate not supported */
455 WRONG_DEST = (1U << 3), /* wrong destination error */
456 CREDIT_TO = (1U << 2), /* credit timeout */
457 WDOG_TO = (1U << 1), /* watchdog timeout */
458 BUF_PAR = (1U << 0), /* buffer parity error */
459};
460
461enum error_info_rec_2 {
462 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
463 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
464 APP_CHK_ERR = (1U << 13), /* Application Check error */
465 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
466 USR_BLK_NM = (1U << 0), /* User Block Number */
467};
468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 00000000000..8646a19f999
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35};
36
37#define SOC_SAS_NUM 2
38
39static struct scsi_host_template mvs_sht = {
40 .module = THIS_MODULE,
41 .name = DRV_NAME,
42 .queuecommand = sas_queuecommand,
43 .target_alloc = sas_target_alloc,
44 .slave_configure = mvs_slave_configure,
45 .slave_destroy = sas_slave_destroy,
46 .scan_finished = mvs_scan_finished,
47 .scan_start = mvs_scan_start,
48 .change_queue_depth = sas_change_queue_depth,
49 .change_queue_type = sas_change_queue_type,
50 .bios_param = sas_bios_param,
51 .can_queue = 1,
52 .cmd_per_lun = 1,
53 .this_id = -1,
54 .sg_tablesize = SG_ALL,
55 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
56 .use_clustering = ENABLE_CLUSTERING,
57 .eh_device_reset_handler = sas_eh_device_reset_handler,
58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
59 .slave_alloc = mvs_slave_alloc,
60 .target_destroy = sas_target_destroy,
61 .ioctl = sas_ioctl,
62};
63
64static struct sas_domain_function_template mvs_transport_ops = {
65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
69 .lldd_control_phy = mvs_phy_control,
70
71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
82};
83
84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
85{
86 struct mvs_phy *phy = &mvi->phy[phy_id];
87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
92 sas_phy->class = SAS;
93 sas_phy->iproto = SAS_PROTOCOL_ALL;
94 sas_phy->tproto = 0;
95 sas_phy->type = PHY_TYPE_PHYSICAL;
96 sas_phy->role = PHY_ROLE_INITIATOR;
97 sas_phy->oob_mode = OOB_NOT_CONNECTED;
98 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
99
100 sas_phy->id = phy_id;
101 sas_phy->sas_addr = &mvi->sas_addr[0];
102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
104 sas_phy->lldd_phy = phy;
105}
106
107static void mvs_free(struct mvs_info *mvi)
108{
109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
112
113 if (!mvi)
114 return;
115
116 if (mvi->flags & MVF_FLAG_SOC)
117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
123 if (slot->buf)
124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
125 slot->buf, slot->buf_dma);
126 }
127
128 if (mvi->tx)
129 dma_free_coherent(mvi->dev,
130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
131 mvi->tx, mvi->tx_dma);
132 if (mvi->rx_fis)
133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
134 mvi->rx_fis, mvi->rx_fis_dma);
135 if (mvi->rx)
136 dma_free_coherent(mvi->dev,
137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
138 mvi->rx, mvi->rx_dma);
139 if (mvi->slot)
140 dma_free_coherent(mvi->dev,
141 sizeof(*mvi->slot) * slot_nr,
142 mvi->slot, mvi->slot_dma);
143#ifndef DISABLE_HOTPLUG_DMA_FIX
144 if (mvi->bulk_buffer)
145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
147#endif
148
149 MVS_CHIP_DISP->chip_iounmap(mvi);
150 if (mvi->shost)
151 scsi_host_put(mvi->shost);
152 list_for_each_entry(mwq, &mvi->wq_list, entry)
153 cancel_delayed_work(&mwq->work_q);
154 kfree(mvi);
155}
156
157#ifdef MVS_USE_TASKLET
158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
160{
161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
164
165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
180
181}
182#endif
183
184static irqreturn_t mvs_interrupt(int irq, void *opaque)
185{
186 u32 core_nr, i = 0;
187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
190
191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193
194 if (unlikely(!mvi))
195 return IRQ_NONE;
196
197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
198 if (!stat)
199 return IRQ_NONE;
200
201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
203#else
204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
208#endif
209 return IRQ_HANDLED;
210}
211
212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
213{
214 int i, slot_nr;
215
216 if (mvi->flags & MVF_FLAG_SOC)
217 slot_nr = MVS_SOC_SLOTS;
218 else
219 slot_nr = MVS_SLOTS;
220
221 spin_lock_init(&mvi->lock);
222 for (i = 0; i < mvi->chip->n_phy; i++) {
223 mvs_phy_init(mvi, i);
224 mvi->port[i].wide_port_phymap = 0;
225 mvi->port[i].port_attached = 0;
226 INIT_LIST_HEAD(&mvi->port[i].list);
227 }
228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
230 mvi->devices[i].dev_type = NO_DEVICE;
231 mvi->devices[i].device_id = i;
232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
233 }
234
235 /*
236 * alloc and init our DMA areas
237 */
238 mvi->tx = dma_alloc_coherent(mvi->dev,
239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
240 &mvi->tx_dma, GFP_KERNEL);
241 if (!mvi->tx)
242 goto err_out;
243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
245 &mvi->rx_fis_dma, GFP_KERNEL);
246 if (!mvi->rx_fis)
247 goto err_out;
248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
249
250 mvi->rx = dma_alloc_coherent(mvi->dev,
251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
252 &mvi->rx_dma, GFP_KERNEL);
253 if (!mvi->rx)
254 goto err_out;
255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
256 mvi->rx[0] = cpu_to_le32(0xfff);
257 mvi->rx_cons = 0xfff;
258
259 mvi->slot = dma_alloc_coherent(mvi->dev,
260 sizeof(*mvi->slot) * slot_nr,
261 &mvi->slot_dma, GFP_KERNEL);
262 if (!mvi->slot)
263 goto err_out;
264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
265
266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
274 struct mvs_slot_info *slot = &mvi->slot_info[i];
275
276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
277 &slot->buf_dma, GFP_KERNEL);
278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
280 goto err_out;
281 }
282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
292
293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
334 goto err_out;
335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
380err_out:
381 mvs_free(mvi);
382 return NULL;
383}
384
385/* move to PCI layer or libata core? */
386static int pci_go_64(struct pci_dev *pdev)
387{
388 int rc;
389
390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
391 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
392 if (rc) {
393 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
394 if (rc) {
395 dev_printk(KERN_ERR, &pdev->dev,
396 "64-bit DMA enable failed\n");
397 return rc;
398 }
399 }
400 } else {
401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
402 if (rc) {
403 dev_printk(KERN_ERR, &pdev->dev,
404 "32-bit DMA enable failed\n");
405 return rc;
406 }
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
408 if (rc) {
409 dev_printk(KERN_ERR, &pdev->dev,
410 "32-bit consistent DMA enable failed\n");
411 return rc;
412 }
413 }
414
415 return rc;
416}
417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
509static int __devinit mvs_pci_init(struct pci_dev *pdev,
510 const struct pci_device_id *ent)
511{
512 unsigned int rc, nhost = 0;
513 struct mvs_info *mvi;
514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
520 rc = pci_enable_device(pdev);
521 if (rc)
522 goto err_out_enable;
523
524 pci_set_master(pdev);
525
526 rc = pci_request_regions(pdev, DRV_NAME);
527 if (rc)
528 goto err_out_disable;
529
530 rc = pci_go_64(pdev);
531 if (rc)
532 goto err_out_regions;
533
534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
535 if (!shost) {
536 rc = -ENOMEM;
537 goto err_out_regions;
538 }
539
540 chip = &mvs_chips[ent->driver_data];
541 SHOST_TO_SAS_HA(shost) =
542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
543 if (!SHOST_TO_SAS_HA(shost)) {
544 kfree(shost);
545 rc = -ENOMEM;
546 goto err_out_regions;
547 }
548
549 rc = mvs_prep_sas_ha_init(shost, chip);
550 if (rc) {
551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
555
556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
557
558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
579 if (rc)
580 goto err_out_shost;
581
582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
583 if (rc)
584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
589
590 MVS_CHIP_DISP->interrupt_enable(mvi);
591
592 scsi_scan_host(mvi->shost);
593
594 return 0;
595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
598err_out_shost:
599 scsi_remove_host(mvi->shost);
600err_out_regions:
601 pci_release_regions(pdev);
602err_out_disable:
603 pci_disable_device(pdev);
604err_out_enable:
605 return rc;
606}
607
608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
609{
610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
613
614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
616
617#ifdef MVS_USE_TASKLET
618 tasklet_kill(&mv_tasklet);
619#endif
620
621 pci_set_drvdata(pdev, NULL);
622 sas_unregister_ha(sha);
623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
630 mvs_free(mvi);
631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
636 pci_disable_device(pdev);
637 return;
638}
639
640static struct pci_device_id __devinitdata mvs_pci_table[] = {
641 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
642 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
643 {
644 .vendor = PCI_VENDOR_ID_MARVELL,
645 .device = 0x6440,
646 .subvendor = PCI_ANY_ID,
647 .subdevice = 0x6480,
648 .class = 0,
649 .class_mask = 0,
650 .driver_data = chip_6485,
651 },
652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
656
657 { } /* terminate list */
658};
659
660static struct pci_driver mvs_pci_driver = {
661 .name = DRV_NAME,
662 .id_table = mvs_pci_table,
663 .probe = mvs_pci_init,
664 .remove = __devexit_p(mvs_pci_remove),
665};
666
667/* task handler */
668struct task_struct *mvs_th;
669static int __init mvs_init(void)
670{
671 int rc;
672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
673 if (!mvs_stt)
674 return -ENOMEM;
675
676 rc = pci_register_driver(&mvs_pci_driver);
677
678 if (rc)
679 goto err_out;
680
681 return 0;
682
683err_out:
684 sas_release_transport(mvs_stt);
685 return rc;
686}
687
688static void __exit mvs_exit(void)
689{
690 pci_unregister_driver(&mvs_pci_driver);
691 sas_release_transport(mvs_stt);
692}
693
694module_init(mvs_init);
695module_exit(mvs_exit);
696
697MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
699MODULE_VERSION(DRV_VERSION);
700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 00000000000..0d213864121
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26
27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
28{
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
34 }
35 return 0;
36}
37
38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
39{
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
42}
43
44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
45{
46 mvs_tag_clear(mvi, tag);
47}
48
49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
50{
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
53}
54
55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
56{
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
59
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
67}
68
69void mvs_tag_init(struct mvs_info *mvi)
70{
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
74}
75
76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
77{
78 u32 i;
79 u32 run;
80 u32 offset;
81
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
95 }
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
103 }
104 printk(KERN_DEBUG"\n");
105}
106
107#if (_MV_DUMP > 1)
108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
110{
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
113
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
120}
121#endif
122
123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
125{
126#if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
130
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167#endif
168}
169
170static void mvs_hba_cq_dump(struct mvs_info *mvi)
171{
172#if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
177
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188#endif
189}
190
191void mvs_get_sas_addr(void *buf, u32 buflen)
192{
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
194}
195
196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
219
220 return mvi;
221
222}
223
224/* FIXME */
225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226{
227 unsigned long i = 0, j = 0, n = 0, num = 0;
228 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
229 struct mvs_info *mvi = mvi_dev->mvi_info;
230 struct sas_ha_struct *sha = dev->port->ha;
231
232 while (sha->sas_port[i]) {
233 if (sha->sas_port[i] == dev->port) {
234 struct asd_sas_phy *phy;
235 list_for_each_entry(phy,
236 &sha->sas_port[i]->phy_list, port_phy_el) {
237 j = 0;
238 while (sha->sas_phy[j]) {
239 if (sha->sas_phy[j] == phy)
240 break;
241 j++;
242 }
243 phyno[n] = (j >= mvi->chip->n_phy) ?
244 (j - mvi->chip->n_phy) : j;
245 num++;
246 n++;
247 }
248 break;
249 }
250 i++;
251 }
252 return num;
253}
254
255static inline void mvs_free_reg_set(struct mvs_info *mvi,
256 struct mvs_device *dev)
257{
258 if (!dev) {
259 mv_printk("device has been free.\n");
260 return;
261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
267}
268
269static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
270 struct mvs_device *dev)
271{
272 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
273 return 0;
274 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
275}
276
277void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
278{
279 u32 no;
280 for_each_phy(phy_mask, phy_mask, no) {
281 if (!(phy_mask & 1))
282 continue;
283 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 }
285}
286
287/* FIXME: locking? */
288int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
289 void *funcdata)
290{
291 int rc = 0, phy_id = sas_phy->id;
292 u32 tmp, i = 0, hi;
293 struct sas_ha_struct *sha = sas_phy->ha;
294 struct mvs_info *mvi = NULL;
295
296 while (sha->sas_phy[i]) {
297 if (sha->sas_phy[i] == sas_phy)
298 break;
299 i++;
300 }
301 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
302 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
303
304 switch (func) {
305 case PHY_FUNC_SET_LINK_RATE:
306 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
307 break;
308
309 case PHY_FUNC_HARD_RESET:
310 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
311 if (tmp & PHY_RST_HARD)
312 break;
313 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
314 break;
315
316 case PHY_FUNC_LINK_RESET:
317 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
318 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
319 break;
320
321 case PHY_FUNC_DISABLE:
322 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
323 break;
324 case PHY_FUNC_RELEASE_SPINUP_HOLD:
325 default:
326 rc = -EOPNOTSUPP;
327 }
328 msleep(200);
329 return rc;
330}
331
332void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
333 u32 off_lo, u32 off_hi, u64 sas_addr)
334{
335 u32 lo = (u32)sas_addr;
336 u32 hi = (u32)(sas_addr>>32);
337
338 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
339 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
340 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
341 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
342}
343
344static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
345{
346 struct mvs_phy *phy = &mvi->phy[i];
347 struct asd_sas_phy *sas_phy = &phy->sas_phy;
348 struct sas_ha_struct *sas_ha;
349 if (!phy->phy_attached)
350 return;
351
352 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
353 && phy->phy_type & PORT_TYPE_SAS) {
354 return;
355 }
356
357 sas_ha = mvi->sas;
358 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
359
360 if (sas_phy->phy) {
361 struct sas_phy *sphy = sas_phy->phy;
362
363 sphy->negotiated_linkrate = sas_phy->linkrate;
364 sphy->minimum_linkrate = phy->minimum_linkrate;
365 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
366 sphy->maximum_linkrate = phy->maximum_linkrate;
367 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
368 }
369
370 if (phy->phy_type & PORT_TYPE_SAS) {
371 struct sas_identify_frame *id;
372
373 id = (struct sas_identify_frame *)phy->frame_rcvd;
374 id->dev_type = phy->identify.device_type;
375 id->initiator_bits = SAS_PROTOCOL_ALL;
376 id->target_bits = phy->identify.target_port_protocols;
377 } else if (phy->phy_type & PORT_TYPE_SATA) {
378 /*Nothing*/
379 }
380 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
381
382 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
383
384 mvi->sas->notify_port_event(sas_phy,
385 PORTE_BYTES_DMAED);
386}
387
388int mvs_slave_alloc(struct scsi_device *scsi_dev)
389{
390 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
391 if (dev_is_sata(dev)) {
392 /* We don't need to rescan targets
393 * if REPORT_LUNS request is failed
394 */
395 if (scsi_dev->lun > 0)
396 return -ENXIO;
397 scsi_dev->tagged_supported = 1;
398 }
399
400 return sas_slave_alloc(scsi_dev);
401}
402
403int mvs_slave_configure(struct scsi_device *sdev)
404{
405 struct domain_device *dev = sdev_to_domain_dev(sdev);
406 int ret = sas_slave_configure(sdev);
407
408 if (ret)
409 return ret;
410 if (dev_is_sata(dev)) {
411 /* may set PIO mode */
412 #if MV_DISABLE_NCQ
413 struct ata_port *ap = dev->sata_dev.ap;
414 struct ata_device *adev = ap->link.device;
415 adev->flags |= ATA_DFLAG_NCQ_OFF;
416 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
417 #endif
418 }
419 return 0;
420}
421
422void mvs_scan_start(struct Scsi_Host *shost)
423{
424 int i, j;
425 unsigned short core_nr;
426 struct mvs_info *mvi;
427 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
428
429 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
430
431 for (j = 0; j < core_nr; j++) {
432 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
433 for (i = 0; i < mvi->chip->n_phy; ++i)
434 mvs_bytes_dmaed(mvi, i);
435 }
436}
437
438int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
439{
440 /* give the phy enabling interrupt event time to come in (1s
441 * is empirically about all it takes) */
442 if (time < HZ)
443 return 0;
444 /* Wait for discovery to finish */
445 scsi_flush_work(shost);
446 return 1;
447}
448
449static int mvs_task_prep_smp(struct mvs_info *mvi,
450 struct mvs_task_exec_info *tei)
451{
452 int elem, rc, i;
453 struct sas_task *task = tei->task;
454 struct mvs_cmd_hdr *hdr = tei->hdr;
455 struct domain_device *dev = task->dev;
456 struct asd_sas_port *sas_port = dev->port;
457 struct scatterlist *sg_req, *sg_resp;
458 u32 req_len, resp_len, tag = tei->tag;
459 void *buf_tmp;
460 u8 *buf_oaf;
461 dma_addr_t buf_tmp_dma;
462 void *buf_prd;
463 struct mvs_slot_info *slot = &mvi->slot_info[tag];
464 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
465#if _MV_DUMP
466 u8 *buf_cmd;
467 void *from;
468#endif
469 /*
470 * DMA-map SMP request, response buffers
471 */
472 sg_req = &task->smp_task.smp_req;
473 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
474 if (!elem)
475 return -ENOMEM;
476 req_len = sg_dma_len(sg_req);
477
478 sg_resp = &task->smp_task.smp_resp;
479 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
480 if (!elem) {
481 rc = -ENOMEM;
482 goto err_out;
483 }
484 resp_len = SB_RFB_MAX;
485
486 /* must be in dwords */
487 if ((req_len & 0x3) || (resp_len & 0x3)) {
488 rc = -EINVAL;
489 goto err_out_2;
490 }
491
492 /*
493 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494 */
495
496 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp = slot->buf;
498 buf_tmp_dma = slot->buf_dma;
499
500#if _MV_DUMP
501 buf_cmd = buf_tmp;
502 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
503 buf_tmp += req_len;
504 buf_tmp_dma += req_len;
505 slot->cmd_size = req_len;
506#else
507 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
508#endif
509
510 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 buf_oaf = buf_tmp;
512 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
513
514 buf_tmp += MVS_OAF_SZ;
515 buf_tmp_dma += MVS_OAF_SZ;
516
517 /* region 3: PRD table *********************************** */
518 buf_prd = buf_tmp;
519 if (tei->n_elem)
520 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
521 else
522 hdr->prd_tbl = 0;
523
524 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
525 buf_tmp += i;
526 buf_tmp_dma += i;
527
528 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
529 slot->response = buf_tmp;
530 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
531 if (mvi->flags & MVF_FLAG_SOC)
532 hdr->reserved[0] = 0;
533
534 /*
535 * Fill in TX ring and command slot header
536 */
537 slot->tx = mvi->tx_prod;
538 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
539 TXQ_MODE_I | tag |
540 (sas_port->phy_mask << TXQ_PHY_SHIFT));
541
542 hdr->flags |= flags;
543 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
544 hdr->tags = cpu_to_le32(tag);
545 hdr->data_len = 0;
546
547 /* generate open address frame hdr (first 12 bytes) */
548 /* initiator, SMP, ftype 1h */
549 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
550 buf_oaf[1] = dev->linkrate & 0xf;
551 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553
554 /* fill in PRD (scatter/gather) table, if any */
555 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556
557#if _MV_DUMP
558 /* copy cmd table */
559 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
560 memcpy(buf_cmd, from + sg_req->offset, req_len);
561 kunmap_atomic(from, KM_IRQ0);
562#endif
563 return 0;
564
565err_out_2:
566 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
567 PCI_DMA_FROMDEVICE);
568err_out:
569 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
570 PCI_DMA_TODEVICE);
571 return rc;
572}
573
574static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
575{
576 struct ata_queued_cmd *qc = task->uldd_task;
577
578 if (qc) {
579 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
580 qc->tf.command == ATA_CMD_FPDMA_READ) {
581 *tag = qc->tag;
582 return 1;
583 }
584 }
585
586 return 0;
587}
588
589static int mvs_task_prep_ata(struct mvs_info *mvi,
590 struct mvs_task_exec_info *tei)
591{
592 struct sas_task *task = tei->task;
593 struct domain_device *dev = task->dev;
594 struct mvs_device *mvi_dev = dev->lldd_dev;
595 struct mvs_cmd_hdr *hdr = tei->hdr;
596 struct asd_sas_port *sas_port = dev->port;
597 struct mvs_slot_info *slot;
598 void *buf_prd;
599 u32 tag = tei->tag, hdr_tag;
600 u32 flags, del_q;
601 void *buf_tmp;
602 u8 *buf_cmd, *buf_oaf;
603 dma_addr_t buf_tmp_dma;
604 u32 i, req_len, resp_len;
605 const u32 max_resp_len = SB_RFB_MAX;
606
607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
610 return -EBUSY;
611 }
612 slot = &mvi->slot_info[tag];
613 slot->tx = mvi->tx_prod;
614 del_q = TXQ_MODE_I | tag |
615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
628 if (task->ata_task.use_ncq)
629 flags |= MCH_FPDMA;
630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
631 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
632 flags |= MCH_ATAPI;
633 }
634
635 /* FIXME: fill in port multiplier number */
636
637 hdr->flags = cpu_to_le32(flags);
638
639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
642 else
643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
648
649 /*
650 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
651 */
652
653 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
654 buf_cmd = buf_tmp = slot->buf;
655 buf_tmp_dma = slot->buf_dma;
656
657 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
658
659 buf_tmp += MVS_ATA_CMD_SZ;
660 buf_tmp_dma += MVS_ATA_CMD_SZ;
661#if _MV_DUMP
662 slot->cmd_size = MVS_ATA_CMD_SZ;
663#endif
664
665 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
666 /* used for STP. unused for SATA? */
667 buf_oaf = buf_tmp;
668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
669
670 buf_tmp += MVS_OAF_SZ;
671 buf_tmp_dma += MVS_OAF_SZ;
672
673 /* region 3: PRD table ********************************************* */
674 buf_prd = buf_tmp;
675
676 if (tei->n_elem)
677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
678 else
679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
681
682 buf_tmp += i;
683 buf_tmp_dma += i;
684
685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
686 /* FIXME: probably unused, for SATA. kept here just in case
687 * we get a STP/SATA error information record
688 */
689 slot->response = buf_tmp;
690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
693
694 req_len = sizeof(struct host_to_dev_fis);
695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
696 sizeof(struct mvs_err_info) - i;
697
698 /* request, response lengths */
699 resp_len = min(resp_len, max_resp_len);
700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
701
702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
704 /* fill in command FIS and ATAPI CDB */
705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
707 memcpy(buf_cmd + STP_ATAPI_CMD,
708 task->ata_task.atapi_packet, 16);
709
710 /* generate open address frame hdr (first 12 bytes) */
711 /* initiator, STP, ftype 1h */
712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
713 buf_oaf[1] = dev->linkrate & 0xf;
714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
716
717 /* fill in PRD (scatter/gather) table, if any */
718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
719#ifndef DISABLE_HOTPLUG_DMA_FIX
720 if (task->data_dir == DMA_FROM_DEVICE)
721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
723#endif
724 return 0;
725}
726
727static int mvs_task_prep_ssp(struct mvs_info *mvi,
728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
730{
731 struct sas_task *task = tei->task;
732 struct mvs_cmd_hdr *hdr = tei->hdr;
733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev = dev->lldd_dev;
736 struct asd_sas_port *sas_port = dev->port;
737 struct mvs_slot_info *slot;
738 void *buf_prd;
739 struct ssp_frame_hdr *ssp_hdr;
740 void *buf_tmp;
741 u8 *buf_cmd, *buf_oaf, fburst = 0;
742 dma_addr_t buf_tmp_dma;
743 u32 flags;
744 u32 resp_len, req_len, i, tag = tei->tag;
745 const u32 max_resp_len = SB_RFB_MAX;
746 u32 phy_mask;
747
748 slot = &mvi->slot_info[tag];
749
750 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
751 sas_port->phy_mask) & TXQ_PHY_MASK;
752
753 slot->tx = mvi->tx_prod;
754 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
755 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
756 (phy_mask << TXQ_PHY_SHIFT));
757
758 flags = MCH_RETRY;
759 if (task->ssp_task.enable_first_burst) {
760 flags |= MCH_FBURST;
761 fburst = (1 << 7);
762 }
763 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len);
770
771 /*
772 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
773 */
774
775 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
776 buf_cmd = buf_tmp = slot->buf;
777 buf_tmp_dma = slot->buf_dma;
778
779 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
780
781 buf_tmp += MVS_SSP_CMD_SZ;
782 buf_tmp_dma += MVS_SSP_CMD_SZ;
783#if _MV_DUMP
784 slot->cmd_size = MVS_SSP_CMD_SZ;
785#endif
786
787 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
788 buf_oaf = buf_tmp;
789 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
790
791 buf_tmp += MVS_OAF_SZ;
792 buf_tmp_dma += MVS_OAF_SZ;
793
794 /* region 3: PRD table ********************************************* */
795 buf_prd = buf_tmp;
796 if (tei->n_elem)
797 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
798 else
799 hdr->prd_tbl = 0;
800
801 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
802 buf_tmp += i;
803 buf_tmp_dma += i;
804
805 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
806 slot->response = buf_tmp;
807 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
808 if (mvi->flags & MVF_FLAG_SOC)
809 hdr->reserved[0] = 0;
810
811 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
812 sizeof(struct mvs_err_info) - i;
813 resp_len = min(resp_len, max_resp_len);
814
815 req_len = sizeof(struct ssp_frame_hdr) + 28;
816
817 /* request, response lengths */
818 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
819
820 /* generate open address frame hdr (first 12 bytes) */
821 /* initiator, SSP, ftype 1h */
822 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
823 buf_oaf[1] = dev->linkrate & 0xf;
824 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
825 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
826
827 /* fill in SSP frame header (Command Table.SSP frame header) */
828 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
829
830 if (is_tmf)
831 ssp_hdr->frame_type = SSP_TASK;
832 else
833 ssp_hdr->frame_type = SSP_COMMAND;
834
835 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
836 HASHED_SAS_ADDR_SIZE);
837 memcpy(ssp_hdr->hashed_src_addr,
838 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
839 ssp_hdr->tag = cpu_to_be16(tag);
840
841 /* fill in IU for TASK and Command Frame */
842 buf_cmd += sizeof(*ssp_hdr);
843 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
844
845 if (ssp_hdr->frame_type != SSP_TASK) {
846 buf_cmd[9] = fburst | task->ssp_task.task_attr |
847 (task->ssp_task.task_prio << 3);
848 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
849 } else{
850 buf_cmd[10] = tmf->tmf;
851 switch (tmf->tmf) {
852 case TMF_ABORT_TASK:
853 case TMF_QUERY_TASK:
854 buf_cmd[12] =
855 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
856 buf_cmd[13] =
857 tmf->tag_of_task_to_be_managed & 0xff;
858 break;
859 default:
860 break;
861 }
862 }
863 /* fill in PRD (scatter/gather) table, if any */
864 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
865 return 0;
866}
867
868#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
869static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
870 struct completion *completion,int is_tmf,
871 struct mvs_tmf_task *tmf)
872{
873 struct domain_device *dev = task->dev;
874 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
875 struct mvs_info *mvi = mvi_dev->mvi_info;
876 struct mvs_task_exec_info tei;
877 struct sas_task *t = task;
878 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0;
881 unsigned long flags = 0;
882
883 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status;
885
886 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t);
889 return 0;
890 }
891
892 spin_lock_irqsave(&mvi->lock, flags);
893 do {
894 dev = t->dev;
895 mvi_dev = dev->lldd_dev;
896 if (DEV_IS_GONE(mvi_dev)) {
897 if (mvi_dev)
898 mv_dprintk("device %d not ready.\n",
899 mvi_dev->device_id);
900 else
901 mv_dprintk("device %016llx not ready.\n",
902 SAS_ADDR(dev->sas_addr));
903
904 rc = SAS_PHY_DOWN;
905 goto out_done;
906 }
907
908 if (dev->port->id >= mvi->chip->n_phy)
909 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
910 else
911 tei.port = &mvi->port[dev->port->id];
912
913 if (!tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) {
915 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN;
918 goto out_done;
919 } else {
920 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED;
922 ts->stat = SAS_PHY_DOWN;
923 t->task_done(t);
924 if (n > 1)
925 t = list_entry(t->list.next,
926 struct sas_task, list);
927 continue;
928 }
929 }
930
931 if (!sas_protocol_ata(t->task_proto)) {
932 if (t->num_scatter) {
933 n_elem = dma_map_sg(mvi->dev,
934 t->scatter,
935 t->num_scatter,
936 t->data_dir);
937 if (!n_elem) {
938 rc = -ENOMEM;
939 goto err_out;
940 }
941 }
942 } else {
943 n_elem = t->num_scatter;
944 }
945
946 rc = mvs_tag_alloc(mvi, &tag);
947 if (rc)
948 goto err_out;
949
950 slot = &mvi->slot_info[tag];
951
952
953 t->lldd_task = NULL;
954 slot->n_elem = n_elem;
955 slot->slot_tag = tag;
956 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
957
958 tei.task = t;
959 tei.hdr = &mvi->slot[tag];
960 tei.tag = tag;
961 tei.n_elem = n_elem;
962 switch (t->task_proto) {
963 case SAS_PROTOCOL_SMP:
964 rc = mvs_task_prep_smp(mvi, &tei);
965 break;
966 case SAS_PROTOCOL_SSP:
967 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
968 break;
969 case SAS_PROTOCOL_SATA:
970 case SAS_PROTOCOL_STP:
971 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
972 rc = mvs_task_prep_ata(mvi, &tei);
973 break;
974 default:
975 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n",
977 t->task_proto);
978 rc = -EINVAL;
979 break;
980 }
981
982 if (rc) {
983 mv_dprintk("rc is %x\n", rc);
984 goto err_out_tag;
985 }
986 slot->task = t;
987 slot->port = tei.port;
988 t->lldd_task = slot;
989 list_add_tail(&slot->entry, &tei.port->list);
990 /* TODO: select normal or high priority */
991 spin_lock(&t->task_state_lock);
992 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
993 spin_unlock(&t->task_state_lock);
994
995 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++;
997 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list);
1001 } while (--n);
1002 rc = 0;
1003 goto out_done;
1004
1005err_out_tag:
1006 mvs_tag_free(mvi, tag);
1007err_out:
1008
1009 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1010 if (!sas_protocol_ata(t->task_proto))
1011 if (n_elem)
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir);
1014out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc;
1021}
1022
1023int mvs_queue_command(struct sas_task *task, const int num,
1024 gfp_t gfp_flags)
1025{
1026 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1027}
1028
1029static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1030{
1031 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1032 mvs_tag_clear(mvi, slot_idx);
1033}
1034
1035static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1036 struct mvs_slot_info *slot, u32 slot_idx)
1037{
1038 if (!slot->task)
1039 return;
1040 if (!sas_protocol_ata(task->task_proto))
1041 if (slot->n_elem)
1042 dma_unmap_sg(mvi->dev, task->scatter,
1043 slot->n_elem, task->data_dir);
1044
1045 switch (task->task_proto) {
1046 case SAS_PROTOCOL_SMP:
1047 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1048 PCI_DMA_FROMDEVICE);
1049 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1050 PCI_DMA_TODEVICE);
1051 break;
1052
1053 case SAS_PROTOCOL_SATA:
1054 case SAS_PROTOCOL_STP:
1055 case SAS_PROTOCOL_SSP:
1056 default:
1057 /* do nothing */
1058 break;
1059 }
1060 list_del_init(&slot->entry);
1061 task->lldd_task = NULL;
1062 slot->task = NULL;
1063 slot->port = NULL;
1064 slot->slot_tag = 0xFFFFFFFF;
1065 mvs_slot_free(mvi, slot_idx);
1066}
1067
1068static void mvs_update_wideport(struct mvs_info *mvi, int i)
1069{
1070 struct mvs_phy *phy = &mvi->phy[i];
1071 struct mvs_port *port = phy->port;
1072 int j, no;
1073
1074 for_each_phy(port->wide_port_phymap, j, no) {
1075 if (j & 1) {
1076 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1077 PHYR_WIDE_PORT);
1078 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1079 port->wide_port_phymap);
1080 } else {
1081 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1082 PHYR_WIDE_PORT);
1083 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1084 0);
1085 }
1086 }
1087}
1088
1089static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1090{
1091 u32 tmp;
1092 struct mvs_phy *phy = &mvi->phy[i];
1093 struct mvs_port *port = phy->port;
1094
1095 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1096 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1097 if (!port)
1098 phy->phy_attached = 1;
1099 return tmp;
1100 }
1101
1102 if (port) {
1103 if (phy->phy_type & PORT_TYPE_SAS) {
1104 port->wide_port_phymap &= ~(1U << i);
1105 if (!port->wide_port_phymap)
1106 port->port_attached = 0;
1107 mvs_update_wideport(mvi, i);
1108 } else if (phy->phy_type & PORT_TYPE_SATA)
1109 port->port_attached = 0;
1110 phy->port = NULL;
1111 phy->phy_attached = 0;
1112 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1113 }
1114 return 0;
1115}
1116
1117static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1118{
1119 u32 *s = (u32 *) buf;
1120
1121 if (!s)
1122 return NULL;
1123
1124 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1125 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1126
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1128 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1131 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1132
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1134 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1135
1136 /* Workaround: take some ATAPI devices for ATA */
1137 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1138 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1139
1140 return s;
1141}
1142
1143static u32 mvs_is_sig_fis_received(u32 irq_status)
1144{
1145 return irq_status & PHYEV_SIG_FIS;
1146}
1147
1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
1151 struct sas_identify_frame *id;
1152
1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
1154
1155 if (get_st) {
1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
1159
1160 if (phy->phy_status) {
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1163
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
1170 phy->phy_attached = 1;
1171 phy->att_dev_sas_addr =
1172 i + mvi->id * mvi->chip->n_phy;
1173 if (oob_done)
1174 sas_phy->oob_mode = SATA_OOB_MODE;
1175 phy->frame_rcvd_size =
1176 sizeof(struct dev_to_host_fis);
1177 mvs_get_d2h_reg(mvi, i, id);
1178 } else {
1179 u32 tmp;
1180 dev_printk(KERN_DEBUG, mvi->dev,
1181 "Phy%d : No sig fis\n", i);
1182 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1183 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1184 tmp | PHYEV_SIG_FIS);
1185 phy->phy_attached = 0;
1186 phy->phy_type &= ~PORT_TYPE_SATA;
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done;
1189 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
1193 phy->identify.device_type =
1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1195
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
1202 if (oob_done)
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
1206 }
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1209
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
1212 }
1213 mv_dprintk("port %d attach dev info is %x\n",
1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1215 mv_dprintk("port %d attach sas addr is %llx\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1217out_done:
1218 if (get_st)
1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1220}
1221
1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1223{
1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
1225 struct mvs_info *mvi = NULL; int i = 0, hi;
1226 struct mvs_phy *phy = sas_phy->lldd_phy;
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
1232
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1240 if (sas_port->id >= mvi->chip->n_phy)
1241 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1242 else
1243 port = &mvi->port[sas_port->id];
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
1246 port->port_attached = 1;
1247 phy->port = port;
1248 if (phy->phy_type & PORT_TYPE_SAS) {
1249 port->wide_port_phymap = sas_port->phy_mask;
1250 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1251 mvs_update_wideport(mvi, sas_phy->id);
1252 }
1253 if (lock)
1254 spin_unlock_irqrestore(&mvi->lock, flags);
1255}
1256
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{
1259 /*Nothing*/
1260}
1261
1262
1263void mvs_port_formed(struct asd_sas_phy *sas_phy)
1264{
1265 mvs_port_notify_formed(sas_phy, 1);
1266}
1267
1268void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1269{
1270 mvs_port_notify_deformed(sas_phy, 1);
1271}
1272
1273struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1274{
1275 u32 dev;
1276 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1277 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1278 mvi->devices[dev].device_id = dev;
1279 return &mvi->devices[dev];
1280 }
1281 }
1282
1283 if (dev == MVS_MAX_DEVICES)
1284 mv_printk("max support %d devices, ignore ..\n",
1285 MVS_MAX_DEVICES);
1286
1287 return NULL;
1288}
1289
1290void mvs_free_dev(struct mvs_device *mvi_dev)
1291{
1292 u32 id = mvi_dev->device_id;
1293 memset(mvi_dev, 0, sizeof(*mvi_dev));
1294 mvi_dev->device_id = id;
1295 mvi_dev->dev_type = NO_DEVICE;
1296 mvi_dev->dev_status = MVS_DEV_NORMAL;
1297 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1298}
1299
1300int mvs_dev_found_notify(struct domain_device *dev, int lock)
1301{
1302 unsigned long flags = 0;
1303 int res = 0;
1304 struct mvs_info *mvi = NULL;
1305 struct domain_device *parent_dev = dev->parent;
1306 struct mvs_device *mvi_device;
1307
1308 mvi = mvs_find_dev_mvi(dev);
1309
1310 if (lock)
1311 spin_lock_irqsave(&mvi->lock, flags);
1312
1313 mvi_device = mvs_alloc_dev(mvi);
1314 if (!mvi_device) {
1315 res = -1;
1316 goto found_out;
1317 }
1318 dev->lldd_dev = mvi_device;
1319 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1322 int phy_id;
1323 u8 phy_num = parent_dev->ex_dev.num_phys;
1324 struct ex_phy *phy;
1325 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1326 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1327 if (SAS_ADDR(phy->attached_sas_addr) ==
1328 SAS_ADDR(dev->sas_addr)) {
1329 mvi_device->attached_phy = phy_id;
1330 break;
1331 }
1332 }
1333
1334 if (phy_id == phy_num) {
1335 mv_printk("Error: no attached dev:%016llx"
1336 "at ex:%016llx.\n",
1337 SAS_ADDR(dev->sas_addr),
1338 SAS_ADDR(parent_dev->sas_addr));
1339 res = -1;
1340 }
1341 }
1342
1343found_out:
1344 if (lock)
1345 spin_unlock_irqrestore(&mvi->lock, flags);
1346 return res;
1347}
1348
1349int mvs_dev_found(struct domain_device *dev)
1350{
1351 return mvs_dev_found_notify(dev, 1);
1352}
1353
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1355{
1356 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info;
1359
1360 if (lock)
1361 spin_lock_irqsave(&mvi->lock, flags);
1362
1363 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type);
1366 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev);
1368 } else {
1369 mv_dprintk("found dev has gone.\n");
1370 }
1371 dev->lldd_dev = NULL;
1372
1373 if (lock)
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375}
1376
1377
1378void mvs_dev_gone(struct domain_device *dev)
1379{
1380 mvs_dev_gone_notify(dev, 1);
1381}
1382
1383static struct sas_task *mvs_alloc_task(void)
1384{
1385 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1386
1387 if (task) {
1388 INIT_LIST_HEAD(&task->list);
1389 spin_lock_init(&task->task_state_lock);
1390 task->task_state_flags = SAS_TASK_STATE_PENDING;
1391 init_timer(&task->timer);
1392 init_completion(&task->completion);
1393 }
1394 return task;
1395}
1396
1397static void mvs_free_task(struct sas_task *task)
1398{
1399 if (task) {
1400 BUG_ON(!list_empty(&task->list));
1401 kfree(task);
1402 }
1403}
1404
1405static void mvs_task_done(struct sas_task *task)
1406{
1407 if (!del_timer(&task->timer))
1408 return;
1409 complete(&task->completion);
1410}
1411
1412static void mvs_tmf_timedout(unsigned long data)
1413{
1414 struct sas_task *task = (struct sas_task *)data;
1415
1416 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1417 complete(&task->completion);
1418}
1419
1420/* XXX */
1421#define MVS_TASK_TIMEOUT 20
1422static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1423 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1424{
1425 int res, retry;
1426 struct sas_task *task = NULL;
1427
1428 for (retry = 0; retry < 3; retry++) {
1429 task = mvs_alloc_task();
1430 if (!task)
1431 return -ENOMEM;
1432
1433 task->dev = dev;
1434 task->task_proto = dev->tproto;
1435
1436 memcpy(&task->ssp_task, parameter, para_len);
1437 task->task_done = mvs_task_done;
1438
1439 task->timer.data = (unsigned long) task;
1440 task->timer.function = mvs_tmf_timedout;
1441 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1442 add_timer(&task->timer);
1443
1444 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1445
1446 if (res) {
1447 del_timer(&task->timer);
1448 mv_printk("executing internel task failed:%d\n", res);
1449 goto ex_err;
1450 }
1451
1452 wait_for_completion(&task->completion);
1453 res = -TMF_RESP_FUNC_FAILED;
1454 /* Even TMF timed out, return direct. */
1455 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1456 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1457 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1458 goto ex_err;
1459 }
1460 }
1461
1462 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1463 task->task_status.stat == SAM_GOOD) {
1464 res = TMF_RESP_FUNC_COMPLETE;
1465 break;
1466 }
1467
1468 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1469 task->task_status.stat == SAS_DATA_UNDERRUN) {
1470 /* no error, but return the number of bytes of
1471 * underrun */
1472 res = task->task_status.residual;
1473 break;
1474 }
1475
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAS_DATA_OVERRUN) {
1478 mv_dprintk("blocked task error.\n");
1479 res = -EMSGSIZE;
1480 break;
1481 } else {
1482 mv_dprintk(" task to dev %016llx response: 0x%x "
1483 "status 0x%x\n",
1484 SAS_ADDR(dev->sas_addr),
1485 task->task_status.resp,
1486 task->task_status.stat);
1487 mvs_free_task(task);
1488 task = NULL;
1489
1490 }
1491 }
1492ex_err:
1493 BUG_ON(retry == 3 && task != NULL);
1494 if (task != NULL)
1495 mvs_free_task(task);
1496 return res;
1497}
1498
1499static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1500 u8 *lun, struct mvs_tmf_task *tmf)
1501{
1502 struct sas_ssp_task ssp_task;
1503 DECLARE_COMPLETION_ONSTACK(completion);
1504 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1505 return TMF_RESP_FUNC_ESUPP;
1506
1507 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1508
1509 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1510 sizeof(ssp_task), tmf);
1511}
1512
1513
1514/* Standard mandates link reset for ATA (type 0)
1515 and hard reset for SSP (type 1) , only for RECOVERY */
1516static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1517{
1518 int rc;
1519 struct sas_phy *phy = sas_find_local_phy(dev);
1520 int reset_type = (dev->dev_type == SATA_DEV ||
1521 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1522 rc = sas_phy_reset(phy, reset_type);
1523 msleep(2000);
1524 return rc;
1525}
1526
1527/* mandatory SAM-3 */
1528int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1529{
1530 unsigned long flags;
1531 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1532 struct mvs_tmf_task tmf_task;
1533 struct mvs_device * mvi_dev = dev->lldd_dev;
1534 struct mvs_info *mvi = mvi_dev->mvi_info;
1535
1536 tmf_task.tmf = TMF_LU_RESET;
1537 mvi_dev->dev_status = MVS_DEV_EH;
1538 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1539 if (rc == TMF_RESP_FUNC_COMPLETE) {
1540 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags);
1545 }
1546 /* If failed, fall-through I_T_Nexus reset */
1547 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1548 mvi_dev->device_id, rc);
1549 return rc;
1550}
1551
1552int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{
1554 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info;
1558
1559 if (mvi_dev->dev_status != MVS_DEV_EH)
1560 return TMF_RESP_FUNC_COMPLETE;
1561 rc = mvs_debug_I_T_nexus_reset(dev);
1562 mv_printk("%s for device[%x]:rc= %d\n",
1563 __func__, mvi_dev->device_id, rc);
1564
1565 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++)
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags);
1571
1572 return rc;
1573}
1574/* optional SAM-3 */
1575int mvs_query_task(struct sas_task *task)
1576{
1577 u32 tag;
1578 struct scsi_lun lun;
1579 struct mvs_tmf_task tmf_task;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581
1582 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1583 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1584 struct domain_device *dev = task->dev;
1585 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1586 struct mvs_info *mvi = mvi_dev->mvi_info;
1587
1588 int_to_scsilun(cmnd->device->lun, &lun);
1589 rc = mvs_find_tag(mvi, task, &tag);
1590 if (rc == 0) {
1591 rc = TMF_RESP_FUNC_FAILED;
1592 return rc;
1593 }
1594
1595 tmf_task.tmf = TMF_QUERY_TASK;
1596 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1597
1598 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1599 switch (rc) {
1600 /* The task is still in Lun, release it then */
1601 case TMF_RESP_FUNC_SUCC:
1602 /* The task is not in Lun or failed, reset the phy */
1603 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE:
1605 break;
1606 }
1607 }
1608 mv_printk("%s:rc= %d\n", __func__, rc);
1609 return rc;
1610}
1611
1612/* mandatory SAM-3, still need free task/slot info */
1613int mvs_abort_task(struct sas_task *task)
1614{
1615 struct scsi_lun lun;
1616 struct mvs_tmf_task tmf_task;
1617 struct domain_device *dev = task->dev;
1618 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1619 struct mvs_info *mvi = mvi_dev->mvi_info;
1620 int rc = TMF_RESP_FUNC_FAILED;
1621 unsigned long flags;
1622 u32 tag;
1623
1624 if (mvi->exp_req)
1625 mvi->exp_req--;
1626 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags);
1629 rc = TMF_RESP_FUNC_COMPLETE;
1630 goto out;
1631 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags);
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635
1636 int_to_scsilun(cmnd->device->lun, &lun);
1637 rc = mvs_find_tag(mvi, task, &tag);
1638 if (rc == 0) {
1639 mv_printk("No such tag in %s\n", __func__);
1640 rc = TMF_RESP_FUNC_FAILED;
1641 return rc;
1642 }
1643
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1646
1647 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1648
1649 /* if successful, clear the task and callback forwards.*/
1650 if (rc == TMF_RESP_FUNC_COMPLETE) {
1651 u32 slot_no;
1652 struct mvs_slot_info *slot;
1653
1654 if (task->lldd_task) {
1655 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info);
1657 mvs_slot_complete(mvi, slot_no, 1);
1658 }
1659 }
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */
1663 } else {
1664 /* SMP */
1665
1666 }
1667out:
1668 if (rc != TMF_RESP_FUNC_COMPLETE)
1669 mv_printk("%s:rc= %d\n", __func__, rc);
1670 return rc;
1671}
1672
1673int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1674{
1675 int rc = TMF_RESP_FUNC_FAILED;
1676 struct mvs_tmf_task tmf_task;
1677
1678 tmf_task.tmf = TMF_ABORT_TASK_SET;
1679 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1680
1681 return rc;
1682}
1683
1684int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1685{
1686 int rc = TMF_RESP_FUNC_FAILED;
1687 struct mvs_tmf_task tmf_task;
1688
1689 tmf_task.tmf = TMF_CLEAR_ACA;
1690 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1691
1692 return rc;
1693}
1694
1695int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1696{
1697 int rc = TMF_RESP_FUNC_FAILED;
1698 struct mvs_tmf_task tmf_task;
1699
1700 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1701 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1702
1703 return rc;
1704}
1705
1706static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1707 u32 slot_idx, int err)
1708{
1709 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1710 struct task_status_struct *tstat = &task->task_status;
1711 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1712 int stat = SAM_GOOD;
1713
1714
1715 resp->frame_len = sizeof(struct dev_to_host_fis);
1716 memcpy(&resp->ending_fis[0],
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err))
1721 stat = SAS_PROTO_RESPONSE;
1722 return stat;
1723}
1724
1725static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1726 u32 slot_idx)
1727{
1728 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1729 int stat;
1730 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1731 u32 tfs = 0;
1732 enum mvs_port_type type = PORT_TYPE_SAS;
1733
1734 if (err_dw0 & CMD_ISS_STPD)
1735 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1736
1737 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1738
1739 stat = SAM_CHECK_COND;
1740 switch (task->task_proto) {
1741 case SAS_PROTOCOL_SSP:
1742 stat = SAS_ABORTED_TASK;
1743 break;
1744 case SAS_PROTOCOL_SMP:
1745 stat = SAM_CHECK_COND;
1746 break;
1747
1748 case SAS_PROTOCOL_SATA:
1749 case SAS_PROTOCOL_STP:
1750 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1751 {
1752 if (err_dw0 == 0x80400002)
1753 mv_printk("find reserved error, why?\n");
1754
1755 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE;
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 }
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 return stat;
1766}
1767
1768int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1769{
1770 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1771 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1772 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat;
1775
1776 bool aborted;
1777 void *to;
1778 enum exec_status sts;
1779
1780 if (mvi->exp_req)
1781 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task))
1783 return -1;
1784
1785 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev;
1787
1788 mvs_hba_cq_dump(mvi);
1789
1790 spin_lock(&task->task_state_lock);
1791 task->task_state_flags &=
1792 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1793 task->task_state_flags |= SAS_TASK_STATE_DONE;
1794 /* race condition*/
1795 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1796 spin_unlock(&task->task_state_lock);
1797
1798 memset(tstat, 0, sizeof(*tstat));
1799 tstat->resp = SAS_TASK_COMPLETE;
1800
1801 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev)
1804 mvi_dev->runing_req--;
1805 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev);
1807
1808 mvs_slot_task_free(mvi, task, slot, slot_idx);
1809 return -1;
1810 }
1811
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1813 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN;
1815 goto out;
1816 }
1817
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1830 goto out;
1831 }
1832
1833 switch (task->task_proto) {
1834 case SAS_PROTOCOL_SSP:
1835 /* hw says status == 0, datapres == 0 */
1836 if (rx_desc & RXQ_GOOD) {
1837 tstat->stat = SAM_GOOD;
1838 tstat->resp = SAS_TASK_COMPLETE;
1839 }
1840 /* response frame present */
1841 else if (rx_desc & RXQ_RSP) {
1842 struct ssp_response_iu *iu = slot->response +
1843 sizeof(struct mvs_err_info);
1844 sas_ssp_task_response(mvi->dev, task, iu);
1845 } else
1846 tstat->stat = SAM_CHECK_COND;
1847 break;
1848
1849 case SAS_PROTOCOL_SMP: {
1850 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1851 tstat->stat = SAM_GOOD;
1852 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1853 memcpy(to + sg_resp->offset,
1854 slot->response + sizeof(struct mvs_err_info),
1855 sg_dma_len(sg_resp));
1856 kunmap_atomic(to, KM_IRQ0);
1857 break;
1858 }
1859
1860 case SAS_PROTOCOL_SATA:
1861 case SAS_PROTOCOL_STP:
1862 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1863 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1864 break;
1865 }
1866
1867 default:
1868 tstat->stat = SAM_CHECK_COND;
1869 break;
1870 }
1871
1872out:
1873 if (mvi_dev) {
1874 mvi_dev->runing_req--;
1875 if (sas_protocol_ata(task->task_proto))
1876 mvs_free_reg_set(mvi, mvi_dev);
1877 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx);
1879 sts = tstat->stat;
1880
1881 spin_unlock(&mvi->lock);
1882 if (task->task_done)
1883 task->task_done(task);
1884 else
1885 mv_dprintk("why has not task_done.\n");
1886 spin_lock(&mvi->lock);
1887
1888 return sts;
1889}
1890
1891void mvs_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev)
1893{
1894 int i = 0; u32 slot_idx;
1895 struct mvs_phy *phy;
1896 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2;
1898
1899 phy = &mvi->phy[phy_no];
1900 port = phy->port;
1901 if (!port)
1902 return;
1903
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task;
1906 slot_idx = (u32) (slot - mvi->slot_info);
1907 task = slot->task;
1908
1909 if (dev && task->dev != dev)
1910 continue;
1911
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task);
1914
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921
1922 mvs_slot_complete(mvi, slot_idx, 1);
1923 }
1924}
1925
1926static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{
1928 phy->phy_attached = 0;
1929 phy->att_dev_info = 0;
1930 phy->att_dev_sas_addr = 0;
1931}
1932
1933static void mvs_work_queue(struct work_struct *work)
1934{
1935 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1936 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1937 struct mvs_info *mvi = mwq->mvi;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&mvi->lock, flags);
1941 if (mwq->handler & PHY_PLUG_EVENT) {
1942 u32 phy_no = (unsigned long) mwq->data;
1943 struct sas_ha_struct *sas_ha = mvi->sas;
1944 struct mvs_phy *phy = &mvi->phy[phy_no];
1945 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1946
1947 if (phy->phy_event & PHY_PLUG_OUT) {
1948 u32 tmp;
1949 struct sas_identify_frame *id;
1950 id = (struct sas_identify_frame *)phy->frame_rcvd;
1951 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1952 phy->phy_event &= ~PHY_PLUG_OUT;
1953 if (!(tmp & PHY_READY_MASK)) {
1954 sas_phy_disconnected(sas_phy);
1955 mvs_phy_disconnected(phy);
1956 sas_ha->notify_phy_event(sas_phy,
1957 PHYE_LOSS_OF_SIGNAL);
1958 mv_dprintk("phy%d Removed Device\n", phy_no);
1959 } else {
1960 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1961 mvs_update_phyinfo(mvi, phy_no, 1);
1962 mvs_bytes_dmaed(mvi, phy_no);
1963 mvs_port_notify_formed(sas_phy, 0);
1964 mv_dprintk("phy%d Attached Device\n", phy_no);
1965 }
1966 }
1967 }
1968 list_del(&mwq->entry);
1969 spin_unlock_irqrestore(&mvi->lock, flags);
1970 kfree(mwq);
1971}
1972
1973static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1974{
1975 struct mvs_wq *mwq;
1976 int ret = 0;
1977
1978 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1979 if (mwq) {
1980 mwq->mvi = mvi;
1981 mwq->data = data;
1982 mwq->handler = handler;
1983 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1984 list_add_tail(&mwq->entry, &mvi->wq_list);
1985 schedule_delayed_work(&mwq->work_q, HZ * 2);
1986 } else
1987 ret = -ENOMEM;
1988
1989 return ret;
1990}
1991
1992static void mvs_sig_time_out(unsigned long tphy)
1993{
1994 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1995 struct mvs_info *mvi = phy->mvi;
1996 u8 phy_no;
1997
1998 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1999 if (&mvi->phy[phy_no] == phy) {
2000 mv_dprintk("Get signature time out, reset phy %d\n",
2001 phy_no+mvi->id*mvi->chip->n_phy);
2002 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2003 }
2004 }
2005}
2006
2007static void mvs_sig_remove_timer(struct mvs_phy *phy)
2008{
2009 if (phy->timer.function)
2010 del_timer(&phy->timer);
2011 phy->timer.function = NULL;
2012}
2013
2014void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2015{
2016 u32 tmp;
2017 struct sas_ha_struct *sas_ha = mvi->sas;
2018 struct mvs_phy *phy = &mvi->phy[phy_no];
2019 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2020
2021 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2022 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2023 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2024 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2025 phy->irq_status);
2026
2027 /*
2028 * events is port event now ,
2029 * we need check the interrupt status which belongs to per port.
2030 */
2031
2032 if (phy->irq_status & PHYEV_DCDR_ERR)
2033 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy);
2035
2036 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready;
2040 mvs_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT;
2042 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT);
2045 ready = mvs_is_phy_ready(mvi, phy_no);
2046 if (!ready)
2047 mv_dprintk("phy%d Unplug Notice\n",
2048 phy_no +
2049 mvi->id * mvi->chip->n_phy);
2050 if (ready || dev_sata) {
2051 if (MVS_CHIP_DISP->stp_reset)
2052 MVS_CHIP_DISP->stp_reset(mvi,
2053 phy_no);
2054 else
2055 MVS_CHIP_DISP->phy_reset(mvi,
2056 phy_no, 0);
2057 return;
2058 }
2059 }
2060 }
2061
2062 if (phy->irq_status & PHYEV_COMWAKE) {
2063 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2064 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2065 tmp | PHYEV_SIG_FIS);
2066 if (phy->timer.function == NULL) {
2067 phy->timer.data = (unsigned long)phy;
2068 phy->timer.function = mvs_sig_time_out;
2069 phy->timer.expires = jiffies + 10*HZ;
2070 add_timer(&phy->timer);
2071 }
2072 }
2073 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2074 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2075 mvs_sig_remove_timer(phy);
2076 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2077 if (phy->phy_status) {
2078 mdelay(10);
2079 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2080 if (phy->phy_type & PORT_TYPE_SATA) {
2081 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2082 mvi, phy_no);
2083 tmp &= ~PHYEV_SIG_FIS;
2084 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2085 phy_no, tmp);
2086 }
2087 mvs_update_phyinfo(mvi, phy_no, 0);
2088 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) {
2091 mvs_port_notify_formed(sas_phy, 0);
2092 phy->phy_event &= ~PHY_PLUG_OUT;
2093 }
2094 } else {
2095 mv_dprintk("plugin interrupt but phy%d is gone\n",
2096 phy_no + mvi->id*mvi->chip->n_phy);
2097 }
2098 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2099 mv_dprintk("port %d broadcast change.\n",
2100 phy_no + mvi->id*mvi->chip->n_phy);
2101 /* exception for Samsung disk drive*/
2102 mdelay(1000);
2103 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2104 }
2105 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2106}
2107
2108int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2109{
2110 u32 rx_prod_idx, rx_desc;
2111 bool attn = false;
2112
2113 /* the first dword in the RX ring is special: it contains
2114 * a mirror of the hardware's RX producer index, so that
2115 * we don't have to stall the CPU reading that register.
2116 * The actual RX ring is offset by one dword, due to this.
2117 */
2118 rx_prod_idx = mvi->rx_cons;
2119 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2120 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2121 return 0;
2122
2123 /* The CMPL_Q may come late, read from register and try again
2124 * note: if coalescing is enabled,
2125 * it will need to read from register every time for sure
2126 */
2127 if (unlikely(mvi->rx_cons == rx_prod_idx))
2128 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2129
2130 if (mvi->rx_cons == rx_prod_idx)
2131 return 0;
2132
2133 while (mvi->rx_cons != rx_prod_idx) {
2134 /* increment our internal RX consumer pointer */
2135 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2136 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2137
2138 if (likely(rx_desc & RXQ_DONE))
2139 mvs_slot_complete(mvi, rx_desc, 0);
2140 if (rx_desc & RXQ_ATTN) {
2141 attn = true;
2142 } else if (rx_desc & RXQ_ERR) {
2143 if (!(rx_desc & RXQ_DONE))
2144 mvs_slot_complete(mvi, rx_desc, 0);
2145 } else if (rx_desc & RXQ_SLOT_RESET) {
2146 mvs_slot_free(mvi, rx_desc);
2147 }
2148 }
2149
2150 if (attn && self_clear)
2151 MVS_CHIP_DISP->int_full(mvi);
2152 return 0;
2153}
2154
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 00000000000..aa2270af1ba
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_SAS_H_
26#define _MV_SAS_H_
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/spinlock.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/dma-mapping.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/interrupt.h>
38#include <linux/irq.h>
39#include <linux/vmalloc.h>
40#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h>
42#include <scsi/sas_ata.h>
43#include <linux/version.h>
44#include "mv_defs.h"
45
46#define DRV_NAME "mvsas"
47#define DRV_VERSION "0.8.2"
48#define _MV_DUMP 0
49#define MVS_ID_NOT_MAPPED 0x7f
50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
71
72#define bit(n) ((u32)1 << n)
73
74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
77 (++__lseq), (__mc) >>= 1)
78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
171struct mvs_chip_info {
172 u32 n_host;
173 u32 n_phy;
174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
184
185struct mvs_err_info {
186 __le32 flags;
187 __le32 flags2;
188};
189
190struct mvs_cmd_hdr {
191 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
192 __le32 lens; /* cmd, max resp frame len */
193 __le32 tags; /* targ port xfer tag; tag */
194 __le32 data_len; /* data xfer len */
195 __le64 cmd_tbl; /* command table address */
196 __le64 open_frame; /* open addr frame address */
197 __le64 status_buf; /* status buffer address */
198 __le64 prd_tbl; /* PRD tbl address */
199 __le32 reserved[4];
200};
201
202struct mvs_port {
203 struct asd_sas_port sas_port;
204 u8 port_attached;
205 u8 wide_port_phymap;
206 struct list_head list;
207};
208
209struct mvs_phy {
210 struct mvs_info *mvi;
211 struct mvs_port *port;
212 struct asd_sas_phy sas_phy;
213 struct sas_identify identify;
214 struct scsi_device *sdev;
215 struct timer_list timer;
216 u64 dev_sas_addr;
217 u64 att_dev_sas_addr;
218 u32 att_dev_info;
219 u32 dev_info;
220 u32 phy_type;
221 u32 phy_status;
222 u32 irq_status;
223 u32 frame_rcvd_size;
224 u8 frame_rcvd[32];
225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
229 enum sas_linkrate minimum_linkrate;
230 enum sas_linkrate maximum_linkrate;
231};
232
233struct mvs_device {
234 struct list_head dev_entry;
235 enum sas_dev_type dev_type;
236 struct mvs_info *mvi_info;
237 struct domain_device *sas_device;
238 u32 attached_phy;
239 u32 device_id;
240 u32 runing_req;
241 u8 taskfileset;
242 u8 dev_status;
243 u16 reserved;
244};
245
246struct mvs_slot_info {
247 struct list_head entry;
248 union {
249 struct sas_task *task;
250 void *tdata;
251 };
252 u32 n_elem;
253 u32 tx;
254 u32 slot_tag;
255
256 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
257 * and PRD table
258 */
259 void *buf;
260 dma_addr_t buf_dma;
261#if _MV_DUMP
262 u32 cmd_size;
263#endif
264 void *response;
265 struct mvs_port *port;
266 struct mvs_device *device;
267 void *open_frame;
268};
269
270struct mvs_info {
271 unsigned long flags;
272
273 /* host-wide lock */
274 spinlock_t lock;
275
276 /* our device */
277 struct pci_dev *pdev;
278 struct device *dev;
279
280 /* enhanced mode registers */
281 void __iomem *regs;
282
283 /* peripheral or soc registers */
284 void __iomem *regs_ex;
285 u8 sas_addr[SAS_ADDR_SIZE];
286
287 /* SCSI/SAS glue */
288 struct sas_ha_struct *sas;
289 struct Scsi_Host *shost;
290
291 /* TX (delivery) DMA ring */
292 __le32 *tx;
293 dma_addr_t tx_dma;
294
295 /* cached next-producer idx */
296 u32 tx_prod;
297
298 /* RX (completion) DMA ring */
299 __le32 *rx;
300 dma_addr_t rx_dma;
301
302 /* RX consumer idx */
303 u32 rx_cons;
304
305 /* RX'd FIS area */
306 __le32 *rx_fis;
307 dma_addr_t rx_fis_dma;
308
309 /* DMA command header slots */
310 struct mvs_cmd_hdr *slot;
311 dma_addr_t slot_dma;
312
313 u32 chip_id;
314 const struct mvs_chip_info *chip;
315
316 int tags_num;
317 DECLARE_BITMAP(tags, MVS_SLOTS);
318 /* further per-slot information */
319 struct mvs_phy phy[MVS_MAX_PHYS];
320 struct mvs_port port[MVS_MAX_PHYS];
321 u32 irq;
322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
356};
357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
382int mvs_slave_configure(struct scsi_device *sdev);
383void mvs_scan_start(struct Scsi_Host *shost);
384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
385int mvs_queue_command(struct sas_task *task, const int num,
386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
397int mvs_I_T_nexus_reset(struct domain_device *dev);
398int mvs_query_task(struct sas_task *task);
399void mvs_release_task(struct mvs_info *mvi, int phy_no,
400 struct domain_device *dev);
401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
405#endif
406
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d1..5fd73d77c3a 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
11# it under the terms of the GNU General Public License version 2 11# it under the terms of the GNU General Public License version 2
12# 12#
13 13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library 14# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o 15libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o 16obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83b..00000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab..7a117c18114 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
118 _osd_ver_desc(or)); 118 _osd_ver_desc(or));
119 119
120 pFirst = get_attrs[a++].val_ptr; 120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", 121 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst); 122 (char *)pFirst);
123 123
124 pFirst = get_attrs[a++].val_ptr; 124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", 125 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst); 126 (char *)pFirst);
127 127
128 pFirst = get_attrs[a++].val_ptr; 128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", 129 OSD_INFO("PRODUCT_MODEL [%s]\n",
130 (char *)pFirst); 130 (char *)pFirst);
131 131
132 pFirst = get_attrs[a++].val_ptr; 132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", 133 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U); 134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135 135
136 pFirst = get_attrs[a++].val_ptr; 136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", 137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 138 (char *)pFirst);
139 139
140 pFirst = get_attrs[a].val_ptr; 140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); 141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
142 a++; 142 a++;
143 143
144 pFirst = get_attrs[a++].val_ptr; 144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", 145 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147 147
148 pFirst = get_attrs[a++].val_ptr; 148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", 149 OSD_INFO("USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151 151
152 pFirst = get_attrs[a++].val_ptr; 152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", 153 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 155
156 if (a >= nelem) 156 if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
158 158
159 /* FIXME: Where are the time utilities */ 159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr; 160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", 161 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1], 162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3], 163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]); 164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
169 169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true); 171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); 172 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump);
173 a++; 174 a++;
174 } 175 }
175out: 176out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
669 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 670 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
670 struct osd_obj_id_list *list, unsigned nelem) 671 struct osd_obj_id_list *list, unsigned nelem)
671{ 672{
672 struct request_queue *q = or->osd_dev->scsi_device->request_queue; 673 struct request_queue *q = osd_request_queue(or->osd_dev);
673 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 674 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
674 struct bio *bio; 675 struct bio *bio;
675 676
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
778*/ 779*/
779 780
780void osd_req_write(struct osd_request *or, 781void osd_req_write(struct osd_request *or,
781 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 782 const struct osd_obj_id *obj, u64 offset,
783 struct bio *bio, u64 len)
782{ 784{
783 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); 785 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
784 WARN_ON(or->out.bio || or->out.total_bytes); 786 WARN_ON(or->out.bio || or->out.total_bytes);
785 bio->bi_rw |= (1 << BIO_RW); 787 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
786 or->out.bio = bio; 788 or->out.bio = bio;
787 or->out.total_bytes = bio->bi_size; 789 or->out.total_bytes = len;
788} 790}
789EXPORT_SYMBOL(osd_req_write); 791EXPORT_SYMBOL(osd_req_write);
790 792
793int osd_req_write_kern(struct osd_request *or,
794 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
795{
796 struct request_queue *req_q = osd_request_queue(or->osd_dev);
797 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
798
799 if (IS_ERR(bio))
800 return PTR_ERR(bio);
801
802 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
803 osd_req_write(or, obj, offset, bio, len);
804 return 0;
805}
806EXPORT_SYMBOL(osd_req_write_kern);
807
791/*TODO: void osd_req_append(struct osd_request *, 808/*TODO: void osd_req_append(struct osd_request *,
792 const struct osd_obj_id *, struct bio *data_out); */ 809 const struct osd_obj_id *, struct bio *data_out); */
793/*TODO: void osd_req_create_write(struct osd_request *, 810/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
813EXPORT_SYMBOL(osd_req_flush_object); 830EXPORT_SYMBOL(osd_req_flush_object);
814 831
815void osd_req_read(struct osd_request *or, 832void osd_req_read(struct osd_request *or,
816 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 833 const struct osd_obj_id *obj, u64 offset,
834 struct bio *bio, u64 len)
817{ 835{
818 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); 836 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
819 WARN_ON(or->in.bio || or->in.total_bytes); 837 WARN_ON(or->in.bio || or->in.total_bytes);
820 bio->bi_rw &= ~(1 << BIO_RW); 838 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
821 or->in.bio = bio; 839 or->in.bio = bio;
822 or->in.total_bytes = bio->bi_size; 840 or->in.total_bytes = len;
823} 841}
824EXPORT_SYMBOL(osd_req_read); 842EXPORT_SYMBOL(osd_req_read);
825 843
844int osd_req_read_kern(struct osd_request *or,
845 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
846{
847 struct request_queue *req_q = osd_request_queue(or->osd_dev);
848 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
849
850 if (IS_ERR(bio))
851 return PTR_ERR(bio);
852
853 osd_req_read(or, obj, offset, bio, len);
854 return 0;
855}
856EXPORT_SYMBOL(osd_req_read_kern);
857
826void osd_req_get_attributes(struct osd_request *or, 858void osd_req_get_attributes(struct osd_request *or,
827 const struct osd_obj_id *obj) 859 const struct osd_obj_id *obj)
828{ 860{
@@ -889,26 +921,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 921}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 922EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 923
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 924static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 925 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 926 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +936,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 936 else
925 pad_buff = io->pad_buff; 937 pad_buff = io->pad_buff;
926 938
927 ret = _append_map_kern(io->req, pad_buff, padding, 939 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 940 or->alloc_flags);
929 if (ret) 941 if (ret)
930 return ret; 942 return ret;
931 io->total_bytes += padding; 943 io->total_bytes += padding;
932 } 944 }
933 945
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 946 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 947 or->alloc_flags);
936 if (ret) 948 if (ret)
937 return ret; 949 return ret;
@@ -1233,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1233} 1245}
1234 1246
1235static int _osd_req_finalize_data_integrity(struct osd_request *or, 1247static int _osd_req_finalize_data_integrity(struct osd_request *or,
1236 bool has_in, bool has_out, const u8 *cap_key) 1248 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
1237{ 1249{
1238 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1250 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1239 int ret; 1251 int ret;
@@ -1248,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1248 }; 1260 };
1249 unsigned pad; 1261 unsigned pad;
1250 1262
1251 or->out_data_integ.data_bytes = cpu_to_be64( 1263 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1252 or->out.bio ? or->out.bio->bi_size : 0);
1253 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1264 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1254 or->set_attr.total_bytes); 1265 or->set_attr.total_bytes);
1255 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1266 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1293,6 +1304,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1304/*
1294 * osd_finalize_request and helpers 1305 * osd_finalize_request and helpers
1295 */ 1306 */
1307static struct request *_make_request(struct request_queue *q, bool has_write,
1308 struct _osd_io_info *oii, gfp_t flags)
1309{
1310 if (oii->bio)
1311 return blk_make_request(q, oii->bio, flags);
1312 else {
1313 struct request *req;
1314
1315 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1316 if (unlikely(!req))
1317 return ERR_PTR(-ENOMEM);
1318
1319 return req;
1320 }
1321}
1296 1322
1297static int _init_blk_request(struct osd_request *or, 1323static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1324 bool has_in, bool has_out)
@@ -1301,14 +1327,18 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1327 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1328 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1329 struct request *req;
1304 int ret = -ENOMEM; 1330 int ret;
1305 1331
1306 req = blk_get_request(q, has_out, flags); 1332 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1333 if (IS_ERR(req)) {
1334 ret = PTR_ERR(req);
1308 goto out; 1335 goto out;
1336 }
1309 1337
1310 or->request = req; 1338 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1339 req->cmd_type = REQ_TYPE_BLOCK_PC;
1340 req->cmd_flags |= REQ_QUIET;
1341
1312 req->timeout = or->timeout; 1342 req->timeout = or->timeout;
1313 req->retries = or->retries; 1343 req->retries = or->retries;
1314 req->sense = or->sense; 1344 req->sense = or->sense;
@@ -1318,9 +1348,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1348 or->out.req = req;
1319 if (has_in) { 1349 if (has_in) {
1320 /* allocate bidi request */ 1350 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1351 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1352 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1353 OSD_DEBUG("blk_get_request for bidi failed\n");
1354 ret = PTR_ERR(req);
1324 goto out; 1355 goto out;
1325 } 1356 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1357 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1341,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
1341{ 1372{
1342 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1373 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1343 bool has_in, has_out; 1374 bool has_in, has_out;
1375 u64 out_data_bytes = or->out.total_bytes;
1344 int ret; 1376 int ret;
1345 1377
1346 if (options & OSD_REQ_FUA) 1378 if (options & OSD_REQ_FUA)
@@ -1364,26 +1396,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1396 return ret;
1365 } 1397 }
1366 1398
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1399 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1400 or->in.pad_buff = sg_in_pad_buffer;
1389 1401
@@ -1410,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
1410 } 1422 }
1411 } 1423 }
1412 1424
1413 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); 1425 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1426 out_data_bytes, cap_key);
1414 if (ret) 1427 if (ret)
1415 return ret; 1428 return ret;
1416 1429
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba8..0bdef339090 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h> 55#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
175 176
176struct osd_dev *osduld_path_lookup(const char *name) 177struct osd_dev *osduld_path_lookup(const char *name)
177{ 178{
178 struct path path; 179 struct osd_uld_device *oud;
179 struct inode *inode; 180 struct osd_dev *od;
180 struct cdev *cdev; 181 struct file *file;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error; 182 int error;
183 183
184 if (!name || !*name) { 184 if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 186 return ERR_PTR(-EINVAL);
187 } 187 }
188 188
189 error = kern_path(name, LOOKUP_FOLLOW, &path); 189 od = kzalloc(sizeof(*od), GFP_KERNEL);
190 if (error) { 190 if (!od)
191 OSD_ERR("path_lookup of %s failed=>%d\n", name, error); 191 return ERR_PTR(-ENOMEM);
192 return ERR_PTR(error);
193 }
194 192
195 inode = path.dentry->d_inode; 193 file = filp_open(name, O_RDWR, 0);
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */ 194 if (IS_ERR(file)) {
197 if (!S_ISCHR(inode->i_mode)) { 195 error = PTR_ERR(file);
198 OSD_DEBUG("!S_ISCHR()\n"); 196 goto free_od;
199 goto out;
200 } 197 }
201 198
202 cdev = inode->i_cdev; 199 if (file->f_op != &osd_fops){
203 if (!cdev) { 200 error = -EINVAL;
204 OSD_ERR("Before mounting an OSD Based filesystem\n"); 201 goto close_file;
205 OSD_ERR(" user-mode must open+close the %s device\n", name);
206 OSD_ERR(" Example: bash: echo < %s\n", name);
207 goto out;
208 } 202 }
209 203
210 /* The Magic wand. Is it our char-dev */ 204 oud = file->private_data;
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", name);
214 goto out;
215 }
216 205
217 oud = container_of(cdev, struct osd_uld_device, cdev); 206 *od = oud->od;
207 od->file = file;
218 208
219 __uld_get(oud); 209 return od;
220 error = 0;
221 210
222out: 211close_file:
223 path_put(&path); 212 fput(file);
224 return error ? ERR_PTR(error) : &oud->od; 213free_od:
214 kfree(od);
215 return ERR_PTR(error);
225} 216}
226EXPORT_SYMBOL(osduld_path_lookup); 217EXPORT_SYMBOL(osduld_path_lookup);
227 218
228void osduld_put_device(struct osd_dev *od) 219void osduld_put_device(struct osd_dev *od)
229{ 220{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233 221
234 __uld_put(oud); 222 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data;
224
225 BUG_ON(od->scsi_device != oud->od.scsi_device);
226
227 fput(od->file);
228 kfree(od);
235 } 229 }
236} 230}
237EXPORT_SYMBOL(osduld_put_device); 231EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5ed..8371d917a9a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.26" 20#define QLA1280_VERSION "3.27"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup.
25 - Improve error recovery.
23 Rev 3.26, January 16, 2006 Jes Sorensen 26 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support 27 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig 28 Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *); 438 uint8_t, uint16_t *);
436static int qla1280_bus_reset(struct scsi_qla_host *, int); 439static int qla1280_bus_reset(struct scsi_qla_host *, int);
437static int qla1280_device_reset(struct scsi_qla_host *, int, int); 440static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 441static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440static int qla1280_abort_isp(struct scsi_qla_host *); 442static int qla1280_abort_isp(struct scsi_qla_host *);
441#ifdef QLA_64BIT_PTR 443#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
698} 700}
699 701
700/************************************************************************** 702/**************************************************************************
701 * qla1200_queuecommand 703 * qla1280_queuecommand
702 * Queue a command to the controller. 704 * Queue a command to the controller.
703 * 705 *
704 * Note: 706 * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
713{ 715{
714 struct Scsi_Host *host = cmd->device->host; 716 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 717 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp; 718 struct srb *sp = (struct srb *)CMD_SP(cmd);
717 int status; 719 int status;
718 720
719 cmd->scsi_done = fn; 721 cmd->scsi_done = fn;
720 sp->cmd = cmd; 722 sp->cmd = cmd;
721 sp->flags = 0; 723 sp->flags = 0;
724 sp->wait = NULL;
725 CMD_HANDLE(cmd) = (unsigned char *)NULL;
722 726
723 qla1280_print_scsi_cmd(5, cmd); 727 qla1280_print_scsi_cmd(5, cmd);
724 728
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
738 742
739enum action { 743enum action {
740 ABORT_COMMAND, 744 ABORT_COMMAND,
741 ABORT_DEVICE,
742 DEVICE_RESET, 745 DEVICE_RESET,
743 BUS_RESET, 746 BUS_RESET,
744 ADAPTER_RESET, 747 ADAPTER_RESET,
745 FAIL
746}; 748};
747 749
748/* timer action for error action processor */
749static void qla1280_error_wait_timeout(unsigned long __data)
750{
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
753
754 complete(sp->wait);
755}
756 750
757static void qla1280_mailbox_timeout(unsigned long __data) 751static void qla1280_mailbox_timeout(unsigned long __data)
758{ 752{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
767 complete(ha->mailbox_wait); 761 complete(ha->mailbox_wait);
768} 762}
769 763
764static int
765_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
766 struct completion *wait)
767{
768 int status = FAILED;
769 struct scsi_cmnd *cmd = sp->cmd;
770
771 spin_unlock_irq(ha->host->host_lock);
772 wait_for_completion_timeout(wait, 4*HZ);
773 spin_lock_irq(ha->host->host_lock);
774 sp->wait = NULL;
775 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
776 status = SUCCESS;
777 (*cmd->scsi_done)(cmd);
778 }
779 return status;
780}
781
782static int
783qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
784{
785 DECLARE_COMPLETION_ONSTACK(wait);
786
787 sp->wait = &wait;
788 return _qla1280_wait_for_single_command(ha, sp, &wait);
789}
790
791static int
792qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
793{
794 int cnt;
795 int status;
796 struct srb *sp;
797 struct scsi_cmnd *cmd;
798
799 status = SUCCESS;
800
801 /*
802 * Wait for all commands with the designated bus/target
803 * to be completed by the firmware
804 */
805 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
806 sp = ha->outstanding_cmds[cnt];
807 if (sp) {
808 cmd = sp->cmd;
809
810 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
811 continue;
812 if (target >= 0 && SCSI_TCN_32(cmd) != target)
813 continue;
814
815 status = qla1280_wait_for_single_command(ha, sp);
816 if (status == FAILED)
817 break;
818 }
819 }
820 return status;
821}
822
770/************************************************************************** 823/**************************************************************************
771 * qla1200_error_action 824 * qla1280_error_action
772 * The function will attempt to perform a specified error action and 825 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out). 826 * wait for the results (or time out).
774 * 827 *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
780 * Returns: 833 * Returns:
781 * SUCCESS or FAILED 834 * SUCCESS or FAILED
782 * 835 *
783 * Note:
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/ 836 **************************************************************************/
789static int 837static int
790qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 838qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha; 840 struct scsi_qla_host *ha;
793 int bus, target, lun; 841 int bus, target, lun;
794 struct srb *sp; 842 struct srb *sp;
795 uint16_t data; 843 int i, found;
796 unsigned char *handle; 844 int result=FAILED;
797 int result, i; 845 int wait_for_bus=-1;
846 int wait_for_target = -1;
798 DECLARE_COMPLETION_ONSTACK(wait); 847 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer; 848
849 ENTER("qla1280_error_action");
800 850
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 851 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
852 sp = (struct srb *)CMD_SP(cmd);
853 bus = SCSI_BUS_32(cmd);
854 target = SCSI_TCN_32(cmd);
855 lun = SCSI_LUN_32(cmd);
802 856
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 857 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus)); 858 RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
807 RD_REG_WORD(&ha->iobase->host_cmd), 861 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 862 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
809 863
810 ENTER("qla1280_error_action");
811 if (qla1280_verbose) 864 if (qla1280_verbose)
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 865 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n", 866 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action); 867 ha->host_no, cmd, CMD_HANDLE(cmd), action);
815 868
816 if (cmd == NULL) {
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
820 return FAILED;
821 }
822
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
826
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
829 /*
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
833 */
834 if (data & RISC_INT)
835 qla1280_isr(ha, &ha->done_q);
836
837 /* 869 /*
838 * Determine the suggested action that the mid-level driver wants 870 * Check to see if we have the command in the outstanding_cmds[]
839 * us to perform. 871 * array. If not then it must have completed before this error
872 * action was initiated. If the error_action isn't ABORT_COMMAND
873 * then the driver must proceed with the requested action.
840 */ 874 */
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 875 found = -1;
842 if(action == ABORT_COMMAND) { 876 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
843 /* we never got this command */ 877 if (sp == ha->outstanding_cmds[i]) {
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 878 found = i;
845 return SUCCESS; /* no action - we don't have command */ 879 sp->wait = &wait; /* we'll wait for it to complete */
880 break;
846 } 881 }
847 } else {
848 sp->wait = &wait;
849 } 882 }
850 883
851 bus = SCSI_BUS_32(cmd); 884 if (found < 0) { /* driver doesn't have command */
852 target = SCSI_TCN_32(cmd); 885 result = SUCCESS;
853 lun = SCSI_LUN_32(cmd); 886 if (qla1280_verbose) {
887 printk(KERN_INFO
888 "scsi(%ld:%d:%d:%d): specified command has "
889 "already completed.\n", ha->host_no, bus,
890 target, lun);
891 }
892 }
854 893
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
858 result = FAILED;
859 switch (action) { 894 switch (action) {
860 case FAIL:
861 break;
862 895
863 case ABORT_COMMAND: 896 case ABORT_COMMAND:
864 if ((sp->flags & SRB_ABORT_PENDING)) { 897 dprintk(1, "qla1280: RISC aborting command\n");
865 printk(KERN_WARNING 898 /*
866 "scsi(): Command has a pending abort " 899 * The abort might fail due to race when the host_lock
867 "message - ABORT_PENDING.\n"); 900 * is released to issue the abort. As such, we
868 /* This should technically be impossible since we 901 * don't bother to check the return status.
869 * now wait for abort completion */ 902 */
870 break; 903 if (found >= 0)
871 } 904 qla1280_abort_command(ha, sp, found);
872
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
877 result = SUCCESS;
878 else {
879 /*
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
886 */
887 printk(KERN_WARNING
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
891 }
892 }
893 }
894 break;
895
896 case ABORT_DEVICE:
897 if (qla1280_verbose)
898 printk(KERN_INFO
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
902 result = SUCCESS;
903 break; 905 break;
904 906
905 case DEVICE_RESET: 907 case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
907 printk(KERN_INFO 909 printk(KERN_INFO
908 "scsi(%ld:%d:%d:%d): Queueing device reset " 910 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun); 911 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0) 912 if (qla1280_device_reset(ha, bus, target) == 0) {
911 result = SUCCESS; 913 /* issued device reset, set wait conditions */
914 wait_for_bus = bus;
915 wait_for_target = target;
916 }
912 break; 917 break;
913 918
914 case BUS_RESET: 919 case BUS_RESET:
915 if (qla1280_verbose) 920 if (qla1280_verbose)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 921 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus); 922 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0) 923 if (qla1280_bus_reset(ha, bus) == 0) {
919 result = SUCCESS; 924 /* issued bus reset, set wait conditions */
925 wait_for_bus = bus;
926 }
920 break; 927 break;
921 928
922 case ADAPTER_RESET: 929 case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
929 "continue automatically\n", ha->host_no); 936 "continue automatically\n", ha->host_no);
930 } 937 }
931 ha->flags.reset_active = 1; 938 ha->flags.reset_active = 1;
932 /* 939
933 * We restarted all of the commands automatically, so the 940 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
934 * mid-level code can expect completions momentitarily. 941 result = FAILED;
935 */ 942 }
936 if (qla1280_abort_isp(ha) == 0)
937 result = SUCCESS;
938 943
939 ha->flags.reset_active = 0; 944 ha->flags.reset_active = 0;
940 } 945 }
941 946
942 if (!list_empty(&ha->done_q)) 947 /*
943 qla1280_done(ha); 948 * At this point, the host_lock has been released and retaken
944 949 * by the issuance of the mailbox command.
945 /* If we didn't manage to issue the action, or we have no 950 * Wait for the command passed in by the mid-layer if it
946 * command to wait for, exit here */ 951 * was found by the driver. It might have been returned
947 if (result == FAILED || handle == NULL || 952 * between eh recovery steps, hence the check of the "found"
948 handle == (unsigned char *)INVALID_HANDLE) { 953 * variable.
949 /* 954 */
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
953 */
954 sp->wait = NULL;
955 goto leave;
956 }
957 955
958 /* set up a timer just in case we're really jammed */ 956 if (found >= 0)
959 init_timer(&timer); 957 result = _qla1280_wait_for_single_command(ha, sp, &wait);
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
963 add_timer(&timer);
964 958
965 /* wait for the action to complete (or the timer to expire) */ 959 if (action == ABORT_COMMAND && result != SUCCESS) {
966 spin_unlock_irq(ha->host->host_lock); 960 printk(KERN_WARNING
967 wait_for_completion(&wait); 961 "scsi(%li:%i:%i:%i): "
968 del_timer_sync(&timer); 962 "Unable to abort command!\n",
969 spin_lock_irq(ha->host->host_lock); 963 ha->host_no, bus, target, lun);
970 sp->wait = NULL; 964 }
971 965
972 /* the only action we might get a fail for is abort */ 966 /*
973 if (action == ABORT_COMMAND) { 967 * If the command passed in by the mid-layer has been
974 if(sp->flags & SRB_ABORTED) 968 * returned by the board, then wait for any additional
975 result = SUCCESS; 969 * commands which are supposed to complete based upon
976 else 970 * the error action.
977 result = FAILED; 971 *
972 * All commands are unconditionally returned during a
973 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
974 * to wait for them.
975 */
976 if (result == SUCCESS && wait_for_bus >= 0) {
977 result = qla1280_wait_for_pending_commands(ha,
978 wait_for_bus, wait_for_target);
978 } 979 }
979 980
980 leave:
981 dprintk(1, "RESET returning %d\n", result); 981 dprintk(1, "RESET returning %d\n", result);
982 982
983 LEAVE("qla1280_error_action"); 983 LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
1280 switch ((CMD_RESULT(cmd) >> 16)) { 1280 switch ((CMD_RESULT(cmd) >> 16)) {
1281 case DID_RESET: 1281 case DID_RESET:
1282 /* Issue marker command. */ 1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1283 if (!ha->flags.abort_isp_active)
1284 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1284 break; 1285 break;
1285 case DID_ABORT: 1286 case DID_ABORT:
1286 sp->flags &= ~SRB_ABORT_PENDING; 1287 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED; 1288 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1290 break; 1289 break;
1291 default: 1290 default:
1292 break; 1291 break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
1296 scsi_dma_unmap(cmd); 1295 scsi_dma_unmap(cmd);
1297 1296
1298 /* Call the mid-level driver interrupt handler */ 1297 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1300 ha->actthreads--; 1298 ha->actthreads--;
1301 1299
1302 (*(cmd)->scsi_done)(cmd); 1300 if (sp->wait == NULL)
1303 1301 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL) 1302 else
1305 complete(sp->wait); 1303 complete(sp->wait);
1306 } 1304 }
1307 LEAVE("qla1280_done"); 1305 LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
2417qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2415qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2418{ 2416{
2419 struct device_reg __iomem *reg = ha->iobase; 2417 struct device_reg __iomem *reg = ha->iobase;
2420#if 0
2421 LIST_HEAD(done_q);
2422#endif
2423 int status = 0; 2418 int status = 0;
2424 int cnt; 2419 int cnt;
2425 uint16_t *optr, *iptr; 2420 uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2493 mr = MAILBOX_REGISTER_COUNT; 2488 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2489 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2495 2490
2496#if 0
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2499#endif
2500
2501 if (ha->flags.reset_marker) 2491 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha); 2492 qla1280_rst_aen(ha);
2503 2493
2504#if 0
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2507#endif
2508
2509 if (status) 2494 if (status)
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2495 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]); 2496 "0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2641} 2626}
2642 2627
2643/* 2628/*
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2646 *
2647 * Input:
2648 * ha = adapter block pointer.
2649 * bus = SCSI BUS.
2650 * target = SCSI ID.
2651 * lun = SCSI LUN.
2652 *
2653 * Returns:
2654 * 0 = success
2655 */
2656static int
2657qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2658{
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2660 int status;
2661
2662 ENTER("qla1280_abort_device");
2663
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2667
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2670
2671 if (status)
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2673
2674 LEAVE("qla1280_abort_device");
2675 return status;
2676}
2677
2678/*
2679 * qla1280_abort_command 2629 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB. 2630 * Abort command aborts a specified IOCB.
2681 * 2631 *
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2833 2783
2834 /* If room for request in request ring. */ 2784 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) { 2785 if ((req_cnt + 2) >= ha->req_q_cnt) {
2836 status = 1; 2786 status = SCSI_MLQUEUE_HOST_BUSY;
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2787 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2788 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2839 req_cnt); 2789 req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 ha->outstanding_cmds[cnt] != NULL; cnt++); 2795 ha->outstanding_cmds[cnt] != NULL; cnt++);
2846 2796
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2797 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2848 status = 1; 2798 status = SCSI_MLQUEUE_HOST_BUSY;
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2799 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2800 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2851 goto out; 2801 goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3108 ha->req_q_cnt, seg_cnt); 3058 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */ 3059 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) { 3060 if ((req_cnt + 2) >= ha->req_q_cnt) {
3111 status = 1; 3061 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3062 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3063 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt); 3064 ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3070 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3121 3071
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3072 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3123 status = 1; 3073 status = SCSI_MLQUEUE_HOST_BUSY;
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3074 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3075 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3126 goto out; 3076 goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3487 3437
3488 /* Save ISP completion status */ 3438 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0; 3439 CMD_RESULT(sp->cmd) = 0;
3440 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3490 3441
3491 /* Place block on done queue */ 3442 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q); 3443 list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3495 * If we get here we have a real problem! 3446 * If we get here we have a real problem!
3496 */ 3447 */
3497 printk(KERN_WARNING 3448 printk(KERN_WARNING
3498 "qla1280: ISP invalid handle"); 3449 "qla1280: ISP invalid handle\n");
3499 } 3450 }
3500 } 3451 }
3501 break; 3452 break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3753 } 3704 }
3754 } 3705 }
3755 3706
3707 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3708
3756 /* Place command on done queue. */ 3709 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q); 3710 list_add_tail(&sp->list, done_q);
3758 out: 3711 out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3761 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } 3762 }
3810 3763
3764 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3765
3811 /* Place command on done queue. */ 3766 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q); 3767 list_add_tail(&sp->list, done_q);
3813 } 3768 }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
3858 struct scsi_cmnd *cmd; 3813 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt]; 3814 sp = ha->outstanding_cmds[cnt];
3860 if (sp) { 3815 if (sp) {
3861
3862 cmd = sp->cmd; 3816 cmd = sp->cmd;
3863 CMD_RESULT(cmd) = DID_RESET << 16; 3817 CMD_RESULT(cmd) = DID_RESET << 16;
3864 3818 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3865 sp->cmd = NULL;
3866 ha->outstanding_cmds[cnt] = NULL; 3819 ha->outstanding_cmds[cnt] = NULL;
3867 3820 list_add_tail(&sp->list, &ha->done_q);
3868 (*cmd->scsi_done)(cmd);
3869
3870 sp->flags = 0;
3871 } 3821 }
3872 } 3822 }
3873 3823
3824 qla1280_done(ha);
3825
3874 status = qla1280_load_firmware(ha); 3826 status = qla1280_load_firmware(ha);
3875 if (status) 3827 if (status)
3876 goto out; 3828 goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3955 3907
3956 if (scsi_control == SCSI_PHASE_INVALID) { 3908 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1; 3909 ha->bus_settings[bus].scsi_bus_dead = 1;
3958#if 0
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3962
3963 (*(cp)->scsi_done)(cp);
3964#endif
3965 return 1; /* bus is dead */ 3910 return 1; /* bus is dead */
3966 } else { 3911 } else {
3967 ha->bus_settings[bus].scsi_bus_dead = 0; 3912 ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4..834884b9eed 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
88 88
89/* Maximum outstanding commands in ISP queues */ 89/* Maximum outstanding commands in ISP queues */
90#define MAX_OUTSTANDING_COMMANDS 512 90#define MAX_OUTSTANDING_COMMANDS 512
91#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) 91#define COMPLETED_HANDLE ((unsigned char *) \
92 (MAX_OUTSTANDING_COMMANDS + 2))
92 93
93/* ISP request and response entry counts (37-65535) */ 94/* ISP request and response entry counts (37-65535) */
94#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ 95#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a0657..0f879620150 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
97 return 0; 97 return 0;
98 98
99 if (IS_NOCACHE_VPD_TYPE(ha)) 99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
101 ha->nvram_size); 101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size); 103 ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
692 .read = qla2x00_sysfs_read_edc_status, 692 .read = qla2x00_sysfs_read_edc_status,
693}; 693};
694 694
695static ssize_t
696qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
697 struct bin_attribute *bin_attr,
698 char *buf, loff_t off, size_t count)
699{
700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
701 struct device, kobj)));
702 struct qla_hw_data *ha = vha->hw;
703 int rval;
704 uint16_t actual_size;
705
706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
707 return 0;
708
709 if (ha->xgmac_data)
710 goto do_read;
711
712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
713 &ha->xgmac_data_dma, GFP_KERNEL);
714 if (!ha->xgmac_data) {
715 qla_printk(KERN_WARNING, ha,
716 "Unable to allocate memory for XGMAC read-data.\n");
717 return 0;
718 }
719
720do_read:
721 actual_size = 0;
722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
723
724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
725 XGMAC_DATA_SIZE, &actual_size);
726 if (rval != QLA_SUCCESS) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to read XGMAC data (%x).\n", rval);
729 count = 0;
730 }
731
732 count = actual_size > count ? count: actual_size;
733 memcpy(buf, ha->xgmac_data, count);
734
735 return count;
736}
737
738static struct bin_attribute sysfs_xgmac_stats_attr = {
739 .attr = {
740 .name = "xgmac_stats",
741 .mode = S_IRUSR,
742 },
743 .size = 0,
744 .read = qla2x00_sysfs_read_xgmac_stats,
745};
746
747static ssize_t
748qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
749 struct bin_attribute *bin_attr,
750 char *buf, loff_t off, size_t count)
751{
752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
753 struct device, kobj)));
754 struct qla_hw_data *ha = vha->hw;
755 int rval;
756 uint16_t actual_size;
757
758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
759 return 0;
760
761 if (ha->dcbx_tlv)
762 goto do_read;
763
764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
765 &ha->dcbx_tlv_dma, GFP_KERNEL);
766 if (!ha->dcbx_tlv) {
767 qla_printk(KERN_WARNING, ha,
768 "Unable to allocate memory for DCBX TLV read-data.\n");
769 return 0;
770 }
771
772do_read:
773 actual_size = 0;
774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
775
776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
777 DCBX_TLV_DATA_SIZE);
778 if (rval != QLA_SUCCESS) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to read DCBX TLV data (%x).\n", rval);
781 count = 0;
782 }
783
784 memcpy(buf, ha->dcbx_tlv, count);
785
786 return count;
787}
788
789static struct bin_attribute sysfs_dcbx_tlv_attr = {
790 .attr = {
791 .name = "dcbx_tlv",
792 .mode = S_IRUSR,
793 },
794 .size = 0,
795 .read = qla2x00_sysfs_read_dcbx_tlv,
796};
797
695static struct sysfs_entry { 798static struct sysfs_entry {
696 char *name; 799 char *name;
697 struct bin_attribute *attr; 800 struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
706 { "reset", &sysfs_reset_attr, }, 809 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 }, 810 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 },
812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
709 { NULL }, 814 { NULL },
710}; 815};
711 816
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
721 continue; 826 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue; 828 continue;
829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
830 continue;
724 831
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr); 833 iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
743 continue; 850 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue; 852 continue;
853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
854 continue;
746 855
747 sysfs_remove_bin_file(&host->shost_gendev.kobj, 856 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr); 857 iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089} 1198}
1090 1199
1200static ssize_t
1201qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203{
1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205
1206 if (!IS_QLA81XX(vha->hw))
1207 return snprintf(buf, PAGE_SIZE, "\n");
1208
1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1210}
1211
1212static ssize_t
1213qla2x00_vn_port_mac_address_show(struct device *dev,
1214 struct device_attribute *attr, char *buf)
1215{
1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1217
1218 if (!IS_QLA81XX(vha->hw))
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1225}
1226
1227static ssize_t
1228qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1229 char *buf)
1230{
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232
1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1234}
1235
1236static ssize_t
1237qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf)
1239{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval;
1242 uint16_t state[5];
1243
1244 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state));
1247
1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1249 state[1], state[2], state[3], state[4]);
1250}
1251
1091static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1252static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1253static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1254static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1277static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1278static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL); 1279 NULL);
1280static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1281static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1282 qla2x00_vn_port_mac_address_show, NULL);
1283static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1284static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1119 1285
1120struct device_attribute *qla2x00_host_attrs[] = { 1286struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version, 1287 &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
1138 &dev_attr_mpi_version, 1304 &dev_attr_mpi_version,
1139 &dev_attr_phy_version, 1305 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size, 1306 &dev_attr_flash_block_size,
1307 &dev_attr_vlan_id,
1308 &dev_attr_vn_port_mac_address,
1309 &dev_attr_fabric_param,
1310 &dev_attr_fw_state,
1141 NULL, 1311 NULL,
1142}; 1312};
1143 1313
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1313 * At this point all fcport's software-states are cleared. Perform any 1483 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs). 1484 * final cleanup of firmware resources (PCBs and XCBs).
1315 */ 1485 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID) 1486 if (fcport->loop_id != FC_NO_LOOP_ID &&
1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain, 1489 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
1437qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1608qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438{ 1609{
1439 int ret = 0; 1610 int ret = 0;
1440 int cnt = 0; 1611 uint8_t qos = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL; 1613 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw; 1614 struct qla_hw_data *ha = base_vha->hw;
1615 uint16_t options = 0;
1616 int cnt;
1617 struct req_que *req = ha->req_q_map[0];
1445 1618
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) { 1620 if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1497 1670
1498 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1499 1672
1500 /* Create a queue pair for the vport */ 1673 if (ql2xmultique_tag) {
1501 if (ha->mqenable) { 1674 req = ha->req_q_map[1];
1502 if (ha->npiv_info) { 1675 goto vport_queue;
1503 for (; cnt < ha->nvram_npiv_size; cnt++) { 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1504 if (ha->npiv_info[cnt].port_name == 1677 goto vport_queue;
1505 vha->port_name && 1678 /* Create a request queue in QoS mode for the vport */
1506 ha->npiv_info[cnt].node_name == 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1507 vha->node_name) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1508 qos = ha->npiv_info[cnt].q_qos; 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1509 break; 1682 8) == 0) {
1510 } 1683 qos = ha->npiv_info[cnt].q_qos;
1511 } 1684 break;
1685 }
1686 }
1687 if (qos) {
1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1689 qos);
1690 if (!ret)
1691 qla_printk(KERN_WARNING, ha,
1692 "Can't create request queue for vp_idx:%d\n",
1693 vha->vp_idx);
1694 else {
1695 DEBUG2(qla_printk(KERN_INFO, ha,
1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1697 ret, qos, vha->vp_idx));
1698 req = ha->req_q_map[ret];
1512 } 1699 }
1513 qla25xx_create_queues(vha, qos);
1514 } 1700 }
1515 1701
1702vport_queue:
1703 vha->req = req;
1516 return 0; 1704 return 0;
1705
1517vport_create_failed_2: 1706vport_create_failed_2:
1518 qla24xx_disable_vp(vha); 1707 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha); 1708 qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1554 vha->host_no, vha->vp_idx, vha)); 1743 vha->host_no, vha->vp_idx, vha));
1555 } 1744 }
1556 1745
1557 if (ha->mqenable) { 1746 if (vha->req->id && !ql2xmultique_tag) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha, 1748 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n"); 1749 "Queue delete failed.\n");
1561 } 1750 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f1..4a990f4da4e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS; 149 int rval = QLA_SUCCESS;
150 uint32_t cnt; 150 uint32_t cnt;
151 151
152 if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
153 return rval;
154
155 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
156 for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 && 153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
157 rval == QLA_SUCCESS; cnt--) { 155 rval == QLA_SUCCESS; cnt--) {
158 if (cnt) 156 if (cnt)
159 udelay(100); 157 udelay(100);
@@ -351,7 +349,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 350{
353 uint32_t cnt, que_idx; 351 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 352 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 353 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 354 struct device_reg_25xxmq __iomem *reg;
357 355
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 363
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 365 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 366 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 367 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 368 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e..00aa48d975a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -186,7 +188,6 @@ struct req_que;
186 * SCSI Request Block 188 * SCSI Request Block
187 */ 189 */
188typedef struct srb { 190typedef struct srb {
189 struct req_que *que;
190 struct fc_port *fcport; 191 struct fc_port *fcport;
191 192
192 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2009#define VP_RET_CODE_NOT_FOUND 6
2009 2010
2010struct qla_hw_data; 2011struct qla_hw_data;
2011 2012struct rsp_que;
2012/* 2013/*
2013 * ISP operations 2014 * ISP operations
2014 */ 2015 */
@@ -2030,10 +2031,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2031 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2032 void (*disable_intrs) (struct qla_hw_data *);
2032 2033
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2034 int (*abort_command) (srb_t *);
2034 struct req_que *); 2035 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2036 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2038 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2079#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2080
2081struct scsi_qla_host; 2081struct scsi_qla_host;
2082struct rsp_que;
2083 2082
2084struct qla_msix_entry { 2083struct qla_msix_entry {
2085 int have_irq; 2084 int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2139#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2140#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2141#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2142#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2143#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2144 ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2168 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2169 struct qla_msix_entry *msix;
2172 struct req_que *req; 2170 struct req_que *req;
2171 srb_t *status_srb; /* status continuation entry */
2172 struct work_struct q_work;
2173}; 2173};
2174 2174
2175/* Request queue data structure */ 2175/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
2222 uint32_t fce_enabled :1; 2222 uint32_t fce_enabled :1;
2223 uint32_t fac_supported :1; 2223 uint32_t fac_supported :1;
2224 uint32_t chip_reset_done :1; 2224 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1;
2225 } flags; 2227 } flags;
2226 2228
2227 /* This spinlock is used to protect "io transactions", you must 2229 /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2248 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2250 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2251 uint8_t max_req_queues;
2252 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2253 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2254 uint16_t nvram_npiv_size;
2252 2255
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
2255#define FLOGI_MID_SUPPORT BIT_10 2258#define FLOGI_MID_SUPPORT BIT_10
2256#define FLOGI_VSAN_SUPPORT BIT_12 2259#define FLOGI_VSAN_SUPPORT BIT_12
2257#define FLOGI_SP_SUPPORT BIT_13 2260#define FLOGI_SP_SUPPORT BIT_13
2261
2262 uint8_t port_no; /* Physical port of adapter */
2263
2258 /* Timeout timers. */ 2264 /* Timeout timers. */
2259 uint8_t loop_down_abort_time; /* port down timer */ 2265 uint8_t loop_down_abort_time; /* port down timer */
2260 atomic_t loop_down_timer; /* loop down timer */ 2266 atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
2392 dma_addr_t edc_data_dma; 2398 dma_addr_t edc_data_dma;
2393 uint16_t edc_data_len; 2399 uint16_t edc_data_len;
2394 2400
2401#define XGMAC_DATA_SIZE PAGE_SIZE
2402 void *xgmac_data;
2403 dma_addr_t xgmac_data_dma;
2404
2405#define DCBX_TLV_DATA_SIZE PAGE_SIZE
2406 void *dcbx_tlv;
2407 dma_addr_t dcbx_tlv_dma;
2408
2395 struct task_struct *dpc_thread; 2409 struct task_struct *dpc_thread;
2396 uint8_t dpc_active; /* DPC routine is active */ 2410 uint8_t dpc_active; /* DPC routine is active */
2397 2411
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
2510 uint32_t flt_region_vpd; 2524 uint32_t flt_region_vpd;
2511 uint32_t flt_region_nvram; 2525 uint32_t flt_region_nvram;
2512 uint32_t flt_region_npiv_conf; 2526 uint32_t flt_region_npiv_conf;
2527 uint32_t flt_region_gold_fw;
2513 2528
2514 /* Needed for BEACON */ 2529 /* Needed for BEACON */
2515 uint16_t beacon_blink_led; 2530 uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
2536 struct qla_chip_state_84xx *cs84xx; 2551 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2552 struct qla_statistics qla_stats;
2538 struct isp_operations *isp_ops; 2553 struct isp_operations *isp_ops;
2554 struct workqueue_struct *wq;
2539}; 2555};
2540 2556
2541/* 2557/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
2545 struct list_head list; 2561 struct list_head list;
2546 struct list_head vp_fcports; /* list of fcports */ 2562 struct list_head vp_fcports; /* list of fcports */
2547 struct list_head work_list; 2563 struct list_head work_list;
2564 spinlock_t work_lock;
2565
2548 /* Commonly used flags and state information. */ 2566 /* Commonly used flags and state information. */
2549 struct Scsi_Host *host; 2567 struct Scsi_Host *host;
2550 unsigned long host_no; 2568 unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2609#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2610#define DFLG_NO_CABLE BIT_1
2593 2611
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2612 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2613 uint16_t loop_id; /* Host adapter loop id */
2598 2614
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
2618 uint8_t node_name[WWN_SIZE]; 2634 uint8_t node_name[WWN_SIZE];
2619 uint8_t port_name[WWN_SIZE]; 2635 uint8_t port_name[WWN_SIZE];
2620 uint8_t fabric_node_name[WWN_SIZE]; 2636 uint8_t fabric_node_name[WWN_SIZE];
2637
2638 uint16_t fcoe_vlan_id;
2639 uint16_t fcoe_fcf_idx;
2640 uint8_t fcoe_vn_port_mac[6];
2641
2621 uint32_t vp_abort_cnt; 2642 uint32_t vp_abort_cnt;
2622 2643
2623 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2644 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
2643#define VP_ERR_FAB_LOGOUT 4 2664#define VP_ERR_FAB_LOGOUT 4
2644#define VP_ERR_ADAP_NORESOURCES 5 2665#define VP_ERR_ADAP_NORESOURCES 5
2645 struct qla_hw_data *hw; 2666 struct qla_hw_data *hw;
2646 int req_ques[QLA_MAX_HOST_QUES]; 2667 struct req_que *req;
2647} scsi_qla_host_t; 2668} scsi_qla_host_t;
2648 2669
2649/* 2670/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba..dfde2dd865c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
878 /* HCCR statuses. */ 878 /* HCCR statuses. */
879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ 879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ 880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
881#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
882 /* HCCR commands. */ 881 /* HCCR commands. */
883 /* NOOP. */ 882 /* NOOP. */
884#define HCCRX_NOOP 0x00000000 883#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
1241#define FLT_REG_HW_EVENT_1 0x1f 1240#define FLT_REG_HW_EVENT_1 0x1f
1242#define FLT_REG_NPIV_CONF_0 0x29 1241#define FLT_REG_NPIV_CONF_0 0x29
1243#define FLT_REG_NPIV_CONF_1 0x2a 1242#define FLT_REG_NPIV_CONF_1 0x2a
1243#define FLT_REG_GOLD_FW 0x2f
1244 1244
1245struct qla_flt_region { 1245struct qla_flt_region {
1246 uint32_t code; 1246 uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
1405#define MBC_IDC_ACK 0x101 1405#define MBC_IDC_ACK 0x101
1406#define MBC_RESTART_MPI_FW 0x3d 1406#define MBC_RESTART_MPI_FW 0x3d
1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ 1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
1408#define MBC_GET_XGMAC_STATS 0x7a
1409#define MBC_GET_DCBX_PARAMS 0x51
1408 1410
1409/* Flash access control option field bit definitions */ 1411/* Flash access control option field bit definitions */
1410#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1412#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
1711#define FA_VPD0_ADDR_81 0xD0000 1713#define FA_VPD0_ADDR_81 0xD0000
1712#define FA_VPD1_ADDR_81 0xD0400 1714#define FA_VPD1_ADDR_81 0xD0400
1713#define FA_NVRAM0_ADDR_81 0xD0080 1715#define FA_NVRAM0_ADDR_81 0xD0080
1714#define FA_NVRAM1_ADDR_81 0xD0480 1716#define FA_NVRAM1_ADDR_81 0xD0180
1715#define FA_FEATURE_ADDR_81 0xD4000 1717#define FA_FEATURE_ADDR_81 0xD4000
1716#define FA_FLASH_DESCR_ADDR_81 0xD8000 1718#define FA_FLASH_DESCR_ADDR_81 0xD8000
1717#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 1719#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed..65b12d82867 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
65extern int ql2xallocfwdump; 65extern int ql2xallocfwdump;
66extern int ql2xextended_error_logging; 66extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xqfulltracking;
68extern int ql2xiidmaenable; 69extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 70extern int ql2xmaxqueues;
71extern int ql2xmultique_tag;
72extern int ql2xfwloadbin;
70 73
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 74extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 75extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
145extern int 148extern int
146qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 149qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
147 150
148extern void 151extern int
149qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 152qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
150 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); 153 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
151 154
@@ -165,13 +168,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 168qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 169
167extern int 170extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 171qla2x00_abort_command(srb_t *);
169 172
170extern int 173extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 174qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 175
173extern int 176extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 177qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 178
176extern int 179extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 180qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 239qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 240 dma_addr_t);
238 241
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 242extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 243extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 244qla24xx_abort_target(struct fc_port *, unsigned int, int);
245extern int
246qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 247
243extern int 248extern int
244qla2x00_system_error(scsi_qla_host_t *); 249qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
288extern int 293extern int
289qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); 294qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
290 295
296extern int
297qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
298
299extern int
300qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
301
302extern int
303qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
304
305extern int
306qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
307
291/* 308/*
292 * Global Function Prototypes in qla_isr.c source file. 309 * Global Function Prototypes in qla_isr.c source file.
293 */ 310 */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 312extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 313extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 314extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 315extern void
299 316qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 317extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 318extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 319
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 418extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 419extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 420extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 421 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 422extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 423 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 424extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 425extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 426extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 427extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 428extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 429extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 430extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 431extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 432extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 433extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 434extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 435extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
436extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
437
419#endif /* _QLA_GBL_H */ 438#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf8..917534b9f22 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1107 return ret; 1107 return ret;
1108 1108
1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1110 mb, BIT_1); 1110 mb, BIT_1|BIT_0);
1111 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1879 case BIT_13: 1879 case BIT_13:
1880 list[i].fp_speed = PORT_SPEED_4GB; 1880 list[i].fp_speed = PORT_SPEED_4GB;
1881 break; 1881 break;
1882 case BIT_12:
1883 list[i].fp_speed = PORT_SPEED_10GB;
1884 break;
1882 case BIT_11: 1885 case BIT_11:
1883 list[i].fp_speed = PORT_SPEED_8GB; 1886 list[i].fp_speed = PORT_SPEED_8GB;
1884 break; 1887 break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c064..26202612932 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
634 goto chip_diag_failed; 634 goto chip_diag_failed;
635 635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 ha->host_no)); 637 vha->host_no));
638 638
639 /* Reset RISC processor. */ 639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
655 goto chip_diag_failed; 655 goto chip_diag_failed;
656 656
657 /* Check product ID of chip */ 657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659 659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw; 730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0]; 731 struct req_que *req = ha->req_q_map[0];
732 732
733 /* Perform RISC reset. */
734 qla24xx_reset_risc(vha);
735
736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
737 734
738 rval = qla2x00_mbx_reg_test(vha); 735 rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 783 sizeof(uint32_t);
787 if (ha->mqenable) 784 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 785 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 786 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 788 goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 846 rsp_q_size = rsp->length * sizeof(response_t);
851 847
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 850 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 851 dump_size += mq_size + fce_size;
857 852
@@ -891,6 +886,56 @@ cont_alloc:
891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
892} 887}
893 888
889static int
890qla81xx_mpi_sync(scsi_qla_host_t *vha)
891{
892#define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935done:
936 return rval;
937}
938
894/** 939/**
895 * qla2x00_setup_chip() - Load and start RISC firmware. 940 * qla2x00_setup_chip() - Load and start RISC firmware.
896 * @ha: HA context 941 * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 } 961 }
917 962
963 qla81xx_mpi_sync(vha);
964
918 /* Load firmware sequences */ 965 /* Load firmware sequences */
919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
920 if (rval == QLA_SUCCESS) { 967 if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
931 /* Retrieve firmware information. */ 978 /* Retrieve firmware information. */
932 if (rval == QLA_SUCCESS) { 979 if (rval == QLA_SUCCESS) {
933 fw_major_version = ha->fw_major_version; 980 fw_major_version = ha->fw_major_version;
934 qla2x00_get_fw_version(vha, 981 rval = qla2x00_get_fw_version(vha,
935 &ha->fw_major_version, 982 &ha->fw_major_version,
936 &ha->fw_minor_version, 983 &ha->fw_minor_version,
937 &ha->fw_subminor_version, 984 &ha->fw_subminor_version,
938 &ha->fw_attributes, &ha->fw_memory_size, 985 &ha->fw_attributes, &ha->fw_memory_size,
939 ha->mpi_version, &ha->mpi_capabilities, 986 ha->mpi_version, &ha->mpi_capabilities,
940 ha->phy_version); 987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
941 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
942 if (IS_QLA2XXX_MIDTYPE(ha) && 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
943 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
989 ha->fw_subminor_version); 1039 ha->fw_subminor_version);
990 } 1040 }
991 } 1041 }
992 1042failed:
993 if (rval) { 1043 if (rval) {
994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
995 vha->host_no)); 1045 vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1063 uint16_t cnt;
1014 response_t *pkt; 1064 response_t *pkt;
1015 1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1069 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1071 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1072 pkt++;
1020 } 1073 }
1021
1022} 1074}
1023 1075
1024/** 1076/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1228 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1229 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1230 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1231 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1232 icb->msix = cpu_to_le16(msix->entry);
1181 } 1233 }
1182 /* Use alternate PCI bus number */ 1234 /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1283
1232 /* Clear outstanding commands array. */ 1284 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1285 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1286 req = ha->req_q_map[que];
1235 if (!req) 1287 if (!req)
1236 continue; 1288 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1290 req->outstanding_cmds[cnt] = NULL;
1239 1291
1240 req->current_outstanding_cmd = 0; 1292 req->current_outstanding_cmd = 1;
1241 1293
1242 /* Initialize firmware. */ 1294 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1295 req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1297 req->cnt = req->length;
1246 } 1298 }
1247 1299
1248 for (que = 0; que < ha->max_queues; que++) { 1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1301 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1302 if (!rsp)
1251 continue; 1303 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1304 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1305 qla2x00_init_response_q_entries(rsp);
1257 } 1306 }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1307 unsigned long wtime, mtime, cs84xx_time; 1356 unsigned long wtime, mtime, cs84xx_time;
1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1310 uint16_t state[3]; 1359 uint16_t state[5];
1311 struct qla_hw_data *ha = vha->hw; 1360 struct qla_hw_data *ha = vha->hw;
1312 1361
1313 rval = QLA_SUCCESS; 1362 rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1406 vha->host_no, state[0], jiffies)); 1455 vha->host_no, state[0], jiffies));
1407 } while (1); 1456 } while (1);
1408 1457
1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1410 vha->host_no, state[0], jiffies)); 1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1411 1461
1412 if (rval) { 1462 if (rval) {
1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1541 char *st, *en; 1591 char *st, *en;
1542 uint16_t index; 1592 uint16_t index;
1543 struct qla_hw_data *ha = vha->hw; 1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1544 1595
1545 if (memcmp(model, BINZERO, len) != 0) { 1596 if (memcmp(model, BINZERO, len) != 0) {
1546 strncpy(ha->model_number, model, len); 1597 strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 } 1604 }
1554 1605
1555 index = (ha->pdev->subsystem_device & 0xff); 1606 index = (ha->pdev->subsystem_device & 0xff);
1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1557 index < QLA_MODEL_NAMES) 1609 index < QLA_MODEL_NAMES)
1558 strncpy(ha->model_desc, 1610 strncpy(ha->model_desc,
1559 qla2x00_model_name[index * 2 + 1], 1611 qla2x00_model_name[index * 2 + 1],
1560 sizeof(ha->model_desc) - 1); 1612 sizeof(ha->model_desc) - 1);
1561 } else { 1613 } else {
1562 index = (ha->pdev->subsystem_device & 0xff); 1614 index = (ha->pdev->subsystem_device & 0xff);
1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1564 index < QLA_MODEL_NAMES) { 1617 index < QLA_MODEL_NAMES) {
1565 strcpy(ha->model_number, 1618 strcpy(ha->model_number,
1566 qla2x00_model_name[index * 2]); 1619 qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2066 } 2121 }
2067 2122
2068 return (rval); 2123 return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2110 goto cleanup_allocation; 2165 goto cleanup_allocation;
2111 2166
2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2113 ha->host_no, entries)); 2168 vha->host_no, entries));
2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2115 entries * sizeof(struct gid_list_info))); 2170 entries * sizeof(struct gid_list_info)));
2116 2171
@@ -2243,7 +2298,8 @@ static void
2243qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2298qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2244{ 2299{
2245#define LS_UNKNOWN 2 2300#define LS_UNKNOWN 2
2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2247 int rval; 2303 int rval;
2248 uint16_t mb[6]; 2304 uint16_t mb[6];
2249 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2266 fcport->port_name[6], fcport->port_name[7], rval, 2322 fcport->port_name[6], fcport->port_name[7], rval,
2267 fcport->fp_speed, mb[0], mb[1])); 2323 fcport->fp_speed, mb[0], mb[1]));
2268 } else { 2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2269 DEBUG2(qla_printk(KERN_INFO, ha, 2330 DEBUG2(qla_printk(KERN_INFO, ha,
2270 "iIDMA adjusted to %s GB/s on " 2331 "iIDMA adjusted to %s GB/s on "
2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2333 link_speed, fcport->port_name[0],
2273 fcport->port_name[1], fcport->port_name[2], 2334 fcport->port_name[1], fcport->port_name[2],
2274 fcport->port_name[3], fcport->port_name[4], 2335 fcport->port_name[3], fcport->port_name[4],
2275 fcport->port_name[5], fcport->port_name[6], 2336 fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3241{
3181 int rval = QLA_SUCCESS; 3242 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3243 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3244 struct req_que *req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3245 struct rsp_que *rsp;
3185 struct rsp_que *rsp = req->rsp; 3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3186 3252
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3514 int ret = -1;
3449 int i; 3515 int i;
3450 3516
3451 for (i = 1; i < ha->max_queues; i++) { 3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3518 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3519 if (rsp) {
3454 rsp->options &= ~BIT_0; 3520 rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3528 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3529 rsp->id));
3464 } 3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3533 req = ha->req_q_map[i];
3466 if (req) { 3534 if (req) {
3467 /* Clear outstanding commands array. */ 3535 /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3566 nv = ha->nvram; 3634 nv = ha->nvram;
3567 3635
3568 /* Determine NVRAM starting address. */ 3636 /* Determine NVRAM starting address. */
3569 ha->nvram_size = sizeof(struct nvram_24xx); 3637 if (ha->flags.port0) {
3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3640 } else {
3573 if (PCI_FUNC(ha->pdev->devfn)) {
3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3576 } 3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3577 3646
3578 /* Get VPD data into cache */ 3647 /* Get VPD data into cache */
3579 ha->vpd = ha->nvram + VPD_OFFSET; 3648 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3588 chksum += le32_to_cpu(*dptr++); 3657 chksum += le32_to_cpu(*dptr++);
3589 3658
3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3592 3661
3593 /* Bad NVRAM data, set defaults parameters. */ 3662 /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3612 nv->exchange_count = __constant_cpu_to_le16(0); 3681 nv->exchange_count = __constant_cpu_to_le16(0);
3613 nv->hard_address = __constant_cpu_to_le16(124); 3682 nv->hard_address = __constant_cpu_to_le16(124);
3614 nv->port_name[0] = 0x21; 3683 nv->port_name[0] = 0x21;
3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3684 nv->port_name[1] = 0x00 + ha->port_no;
3616 nv->port_name[2] = 0x00; 3685 nv->port_name[2] = 0x00;
3617 nv->port_name[3] = 0xe0; 3686 nv->port_name[3] = 0xe0;
3618 nv->port_name[4] = 0x8b; 3687 nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3798} 3867}
3799 3868
3800static int 3869static int
3801qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3870qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3802{ 3872{
3803 int rval = QLA_SUCCESS; 3873 int rval = QLA_SUCCESS;
3804 int segments, fragment; 3874 int segments, fragment;
3805 uint32_t faddr;
3806 uint32_t *dcode, dlen; 3875 uint32_t *dcode, dlen;
3807 uint32_t risc_addr; 3876 uint32_t risc_addr;
3808 uint32_t risc_size; 3877 uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3811 struct req_que *req = ha->req_q_map[0]; 3880 struct req_que *req = ha->req_q_map[0];
3812 3881
3813 qla_printk(KERN_INFO, ha, 3882 qla_printk(KERN_INFO, ha,
3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3883 "FW: Loading from flash (%x)...\n", faddr);
3815 3884
3816 rval = QLA_SUCCESS; 3885 rval = QLA_SUCCESS;
3817 3886
3818 segments = FA_RISC_CODE_SEGMENTS; 3887 segments = FA_RISC_CODE_SEGMENTS;
3819 faddr = ha->flt_region_fw;
3820 dcode = (uint32_t *)req->ring; 3888 dcode = (uint32_t *)req->ring;
3821 *srisc_addr = 0; 3889 *srisc_addr = 0;
3822 3890
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4104{ 4172{
4105 int rval; 4173 int rval;
4106 4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4107 /* 4178 /*
4108 * FW Load priority: 4179 * FW Load priority:
4109 * 1) Firmware via request-firmware interface (.bin file). 4180 * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4113 if (rval == QLA_SUCCESS) 4184 if (rval == QLA_SUCCESS)
4114 return rval; 4185 return rval;
4115 4186
4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4117} 4189}
4118 4190
4119int 4191int
4120qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4192qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4121{ 4193{
4122 int rval; 4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4123 4199
4124 /* 4200 /*
4125 * FW Load priority: 4201 * FW Load priority:
4126 * 1) Firmware residing in flash. 4202 * 1) Firmware residing in flash.
4127 * 2) Firmware via request-firmware interface (.bin file). 4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4128 */ 4205 */
4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4130 if (rval == QLA_SUCCESS) 4207 if (rval == QLA_SUCCESS)
4131 return rval; 4208 return rval;
4132 4209
4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4210try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4134} 4226}
4135 4227
4136void 4228void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4146 4238
4147 ret = qla2x00_stop_firmware(vha); 4239 ret = qla2x00_stop_firmware(vha);
4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4149 retries ; retries--) { 4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4150 ha->isp_ops->reset_chip(vha); 4242 ha->isp_ops->reset_chip(vha);
4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4152 continue; 4244 continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4258 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4260 struct req_que *req;
4169 struct rsp_que *rsp = req->rsp; 4261 struct rsp_que *rsp;
4170 4262
4171 if (!vha->vp_idx) 4263 if (!vha->vp_idx)
4172 return -EINVAL; 4264 return -EINVAL;
4173 4265
4174 rval = qla2x00_fw_ready(base_vha); 4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4175 if (rval == QLA_SUCCESS) { 4273 if (rval == QLA_SUCCESS) {
4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4306 chksum += le32_to_cpu(*dptr++); 4404 chksum += le32_to_cpu(*dptr++);
4307 4405
4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4310 4408
4311 /* Bad NVRAM data, set defaults parameters. */ 4409 /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4330 nv->exchange_count = __constant_cpu_to_le16(0); 4428 nv->exchange_count = __constant_cpu_to_le16(0);
4331 nv->port_name[0] = 0x21; 4429 nv->port_name[0] = 0x21;
4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4430 nv->port_name[1] = 0x00 + ha->port_no;
4333 nv->port_name[2] = 0x00; 4431 nv->port_name[2] = 0x00;
4334 nv->port_name[3] = 0xe0; 4432 nv->port_name[3] = 0xe0;
4335 nv->port_name[4] = 0x8b; 4433 nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4361 nv->enode_mac[0] = 0x01; 4459 nv->enode_mac[0] = 0x00;
4362 nv->enode_mac[1] = 0x02; 4460 nv->enode_mac[1] = 0x02;
4363 nv->enode_mac[2] = 0x03; 4461 nv->enode_mac[2] = 0x03;
4364 nv->enode_mac[3] = 0x04; 4462 nv->enode_mac[3] = 0x04;
4365 nv->enode_mac[4] = 0x05; 4463 nv->enode_mac[4] = 0x05;
4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4367 4465
4368 rval = 1; 4466 rval = 1;
4369 } 4467 }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4396 icb->enode_mac[2] = 0x03; 4494 icb->enode_mac[2] = 0x03;
4397 icb->enode_mac[3] = 0x04; 4495 icb->enode_mac[3] = 0x04;
4398 icb->enode_mac[4] = 0x05; 4496 icb->enode_mac[4] = 0x05;
4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4400 } 4498 }
4401 4499
4402 /* Use extended-initialization control block. */ 4500 /* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730..13396beae2c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 17
18static void qla25xx_set_que(srb_t *, struct rsp_que **);
18/** 19/**
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * @cmd: SCSI command 21 * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */ 94 */
94static inline cont_entry_t * 95static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) 96qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{ 97{
97 cont_entry_t *cont_pkt; 98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
98 /* Adjust ring index. */ 100 /* Adjust ring index. */
99 req->ring_index++; 101 req->ring_index++;
100 if (req->ring_index == req->length) { 102 if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 122 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 123 */
122static inline cont_a64_entry_t * 124static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) 125qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 126{
125 cont_a64_entry_t *cont_pkt; 127 cont_a64_entry_t *cont_pkt;
126 128
129 struct req_que *req = vha->req;
127 /* Adjust ring index. */ 130 /* Adjust ring index. */
128 req->ring_index++; 131 req->ring_index++;
129 if (req->ring_index == req->length) { 132 if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
159 struct scsi_cmnd *cmd; 162 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 163 struct scatterlist *sg;
161 int i; 164 int i;
162 struct req_que *req;
163 165
164 cmd = sp->cmd; 166 cmd = sp->cmd;
165 167
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
174 } 176 }
175 177
176 vha = sp->fcport->vha; 178 vha = sp->fcport->vha;
177 req = sp->que;
178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 180
181 /* Three DSDs are available in the Command Type 2 IOCB */ 181 /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB. 193 * Type 0 IOCB.
194 */ 194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7; 197 avail_dsds = 7;
198 } 198 }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg; 221 struct scatterlist *sg;
222 int i; 222 int i;
223 struct req_que *req;
224 223
225 cmd = sp->cmd; 224 cmd = sp->cmd;
226 225
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
235 } 234 }
236 235
237 vha = sp->fcport->vha; 236 vha = sp->fcport->vha;
238 req = sp->que;
239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 238
242 /* Two DSDs are available in the Command Type 3 IOCB */ 239 /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
255 * Type 1 IOCB. 252 * Type 1 IOCB.
256 */ 253 */
257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 avail_dsds = 5; 256 avail_dsds = 5;
260 } 257 }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 350 /* Build command packet */
354 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
356 sp->que = req;
357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 req->cnt -= req_cnt; 354 req->cnt -= req_cnt;
359 355
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 449 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 453 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 454 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 455 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 529 *dword_ptr++ = 0;
533 530
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 531 /* Set entry count. */
538 pkt->entry_count = 1; 532 pkt->entry_count = 1;
539 533
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 } 650 }
657 651
658 vha = sp->fcport->vha; 652 vha = sp->fcport->vha;
659 req = sp->que; 653 req = vha->req;
660 654
661 /* Set transfer direction */ 655 /* Set transfer direction */
662 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
687 * Five DSDs are available in the Continuation 681 * Five DSDs are available in the Continuation
688 * Type 1 IOCB. 682 * Type 1 IOCB.
689 */ 683 */
690 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
691 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 avail_dsds = 5; 686 avail_dsds = 5;
693 } 687 }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 718 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 719 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 720 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 721
729 /* Setup device pointers. */ 722 /* Setup device pointers. */
730 ret = 0; 723 ret = 0;
731 que_id = vha->req_ques[0];
732 724
733 req = ha->req_q_map[que_id]; 725 qla25xx_set_que(sp, &rsp);
734 sp->que = req; 726 req = vha->req;
735 727
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 728 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 729 tot_dsds = 0;
742 730
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 782 req->cnt -= req_cnt;
795 783
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 786
799 /* Zero out remaining portion of packet. */ 787 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
823 811
824 /* Set total data segment count. */ 812 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 816 wmb();
827 817
828 /* Adjust ring index. */ 818 /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 832 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 833 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 835 qla24xx_process_response_queue(vha, rsp);
846 836
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 838 return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
855 845
856 return QLA_FUNCTION_FAILED; 846 return QLA_FUNCTION_FAILED;
857} 847}
848
849static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850{
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e5..c8d0a176fea 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
51 status = 0; 50 status = 0;
52 51
53 spin_lock(&ha->hardware_lock); 52 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp); 53 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 54 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 55 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 56 if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
147 status = 0; 146 status = 0;
148 147
149 spin_lock(&ha->hardware_lock); 148 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp); 149 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 150 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 152 if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
685 vha->host_no)); 684 vha->host_no));
686 685
687 if (IS_FWI2_CAPABLE(ha)) 686 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 687 qla24xx_process_response_queue(vha, rsp);
689 else 688 else
690 qla2x00_process_response_queue(rsp); 689 qla2x00_process_response_queue(rsp);
691 break; 690 break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 765 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 766 struct req_que *req = NULL;
768 767
769 req = ha->req_q_map[vha->req_ques[0]]; 768 if (!ql2xqfulltracking)
769 return;
770
771 req = vha->req;
770 if (!req) 772 if (!req)
771 return; 773 return;
772 if (req->max_q_depth <= sdev->queue_depth) 774 if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
808 fc_port_t *fcport; 810 fc_port_t *fcport;
809 struct scsi_device *sdev; 811 struct scsi_device *sdev;
810 812
813 if (!ql2xqfulltracking)
814 return;
815
811 sdev = sp->cmd->device; 816 sdev = sp->cmd->device;
812 if (sdev->queue_depth >= req->max_q_depth) 817 if (sdev->queue_depth >= req->max_q_depth)
813 return; 818 return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 863 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 864 qla2x00_sp_compl(ha, sp);
860 } else { 865 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 866 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 867 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 868 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 869 "Invalid ISP SCSI completion handle\n");
865 870
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 886 uint16_t handle_cnt;
882 uint16_t cnt; 887 uint16_t cnt;
883 888
884 vha = qla2x00_get_rsp_host(rsp); 889 vha = pci_get_drvdata(ha->pdev);
885 890
886 if (!vha->flags.online) 891 if (!vha->flags.online)
887 return; 892 return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 931 }
927 break; 932 break;
928 case STATUS_CONT_TYPE: 933 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 934 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 935 break;
931 default: 936 default:
932 /* Type Not Supported. */ 937 /* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 950}
946 951
947static inline void 952static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 953qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
954 struct rsp_que *rsp)
949{ 955{
950 struct scsi_cmnd *cp = sp->cmd; 956 struct scsi_cmnd *cp = sp->cmd;
951 957
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 968 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 969 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 970 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 971 rsp->status_srb = sp;
966 972
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 973 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 974 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 998 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 999 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 1000 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 1001 uint32_t handle;
1002 uint16_t que;
1003 struct req_que *req;
996 1004
997 sts = (sts_entry_t *) pkt; 1005 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1006 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1011 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1012 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1013 }
1006 1014 handle = (uint32_t) LSW(sts->handle);
1015 que = MSW(sts->handle);
1016 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1017 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1018 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1019 qla2x00_process_completed_request(vha, req, handle);
1010 1020
1011 return; 1021 return;
1012 } 1022 }
1013 1023
1014 /* Validate handle. */ 1024 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1025 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1026 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1027 req->outstanding_cmds[handle] = NULL;
1018 } else 1028 } else
1019 sp = NULL; 1029 sp = NULL;
1020 1030
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1040 cp = sp->cmd;
1031 if (cp == NULL) { 1041 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1042 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1043 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1045 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1046
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1121 scsi_status)); 1131 scsi_status));
1122 1132
1123 /* Adjust queue depth for all luns on the port. */ 1133 /* Adjust queue depth for all luns on the port. */
1134 if (!ql2xqfulltracking)
1135 break;
1124 fcport->last_queue_full = jiffies; 1136 fcport->last_queue_full = jiffies;
1125 starget_for_each_device(cp->device->sdev_target, 1137 starget_for_each_device(cp->device->sdev_target,
1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1138 fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1145 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1146 break;
1135 1147
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1148 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1149 break;
1138 1150
1139 case CS_DATA_UNDERRUN: 1151 case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1179 * Adjust queue depth for all luns on the 1191 * Adjust queue depth for all luns on the
1180 * port. 1192 * port.
1181 */ 1193 */
1194 if (!ql2xqfulltracking)
1195 break;
1182 fcport->last_queue_full = jiffies; 1196 fcport->last_queue_full = jiffies;
1183 starget_for_each_device( 1197 starget_for_each_device(
1184 cp->device->sdev_target, fcport, 1198 cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1206 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1207 break;
1194 1208
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1209 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1210 } else {
1197 /* 1211 /*
1198 * If RISC reports underrun and target does not report 1212 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper 1213 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy. 1214 * layer to retry it by reporting an error.
1201 */ 1215 */
1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1216 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1217 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1207 cp->device->id, cp->device->lun, resid, 1221 cp->device->id, cp->device->lun, resid,
1208 scsi_bufflen(cp))); 1222 scsi_bufflen(cp)));
1209 1223
1210 cp->result = DID_BUS_BUSY << 16; 1224 cp->result = DID_ERROR << 16;
1211 break; 1225 break;
1212 } 1226 }
1213 1227
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1348 }
1335 1349
1336 /* Place command on done queue. */ 1350 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1351 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1352 qla2x00_sp_compl(ha, sp);
1339} 1353}
1340 1354
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1360 * Extended sense data.
1347 */ 1361 */
1348static void 1362static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1363qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1364{
1351 uint8_t sense_sz = 0; 1365 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1366 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1367 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1368 struct scsi_cmnd *cp;
1355 1369
1356 if (sp != NULL && sp->request_sense_length != 0) { 1370 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1376 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1377 sp);
1364 1378
1365 vha->status_srb = NULL; 1379 rsp->status_srb = NULL;
1366 return; 1380 return;
1367 } 1381 }
1368 1382
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1397
1384 /* Place command on done queue. */ 1398 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1399 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1400 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1401 qla2x00_sp_compl(ha, sp);
1388 } 1402 }
1389 } 1403 }
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1413{
1400 srb_t *sp; 1414 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1415 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1416 uint32_t handle = LSW(pkt->handle);
1417 uint16_t que = MSW(pkt->handle);
1418 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1419#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1420 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1421 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1433#endif
1418 1434
1419 /* Validate handle. */ 1435 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1436 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1437 sp = req->outstanding_cmds[handle];
1422 else 1438 else
1423 sp = NULL; 1439 sp = NULL;
1424 1440
1425 if (sp) { 1441 if (sp) {
1426 /* Free outstanding command slot. */ 1442 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1443 req->outstanding_cmds[handle] = NULL;
1428 1444
1429 /* Bad payload or header */ 1445 /* Bad payload or header */
1430 if (pkt->entry_status & 1446 if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1502 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1503 * @ha: SCSI driver HA context
1488 */ 1504 */
1489void 1505void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1506 struct rsp_que *rsp)
1491{ 1507{
1492 struct sts_entry_24xx *pkt; 1508 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1509
1497 if (!vha->flags.online) 1510 if (!vha->flags.online)
1498 return; 1511 return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1536 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1537 break;
1525 case STATUS_CONT_TYPE: 1538 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1539 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1540 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1541 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1542 qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 status = 0; 1639 status = 0;
1627 1640
1628 spin_lock(&ha->hardware_lock); 1641 spin_lock(&ha->hardware_lock);
1629 vha = qla2x00_get_rsp_host(rsp); 1642 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1643 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1644 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1645 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1677 break;
1665 case 0x13: 1678 case 0x13:
1666 case 0x14: 1679 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1680 qla24xx_process_response_queue(vha, rsp);
1668 break; 1681 break;
1669 default: 1682 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1683 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1705 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1706 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1707 struct device_reg_24xx __iomem *reg;
1708 struct scsi_qla_host *vha;
1695 1709
1696 rsp = (struct rsp_que *) dev_id; 1710 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1711 if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1718
1705 spin_lock_irq(&ha->hardware_lock); 1719 spin_lock_irq(&ha->hardware_lock);
1706 1720
1707 qla24xx_process_response_queue(rsp); 1721 vha = qla25xx_get_host(rsp);
1722 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1723 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1724
1710 spin_unlock_irq(&ha->hardware_lock); 1725 spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{ 1732{
1718 struct qla_hw_data *ha; 1733 struct qla_hw_data *ha;
1719 struct rsp_que *rsp; 1734 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721 1735
1722 rsp = (struct rsp_que *) dev_id; 1736 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) { 1737 if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1726 return IRQ_NONE; 1740 return IRQ_NONE;
1727 } 1741 }
1728 ha = rsp->hw; 1742 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730 1743
1731 spin_lock_irq(&ha->hardware_lock); 1744 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736 1745
1737 return IRQ_HANDLED; 1746 return IRQ_HANDLED;
1738} 1747}
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1769 status = 0;
1761 1770
1762 spin_lock_irq(&ha->hardware_lock); 1771 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1772 vha = pci_get_drvdata(ha->pdev);
1764 do { 1773 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1774 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1775 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1807 break;
1799 case 0x13: 1808 case 0x13:
1800 case 0x14: 1809 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1810 qla24xx_process_response_queue(vha, rsp);
1802 break; 1811 break;
1803 default: 1812 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1813 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1831/* Interrupt handling helpers. */
1823 1832
1824struct qla_init_msix_entry { 1833struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1834 const char *name;
1828 irq_handler_t handler; 1835 irq_handler_t handler;
1829}; 1836};
1830 1837
1831static struct qla_init_msix_entry base_queue = { 1838static struct qla_init_msix_entry msix_entries[3] = {
1832 .entry = 0, 1839 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1840 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)", 1841 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1842};
1851 1843
1852static void 1844static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1865 int i, ret;
1874 struct msix_entry *entries; 1866 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1867 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1868
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1869 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1870 GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
1900 ha->msix_count, ret); 1891 ha->msix_count, ret);
1901 goto msix_out; 1892 goto msix_out;
1902 } 1893 }
1903 ha->max_queues = ha->msix_count - 1; 1894 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1895 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1896 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1897 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1909 qentry->rsp = NULL;
1919 } 1910 }
1920 1911
1921 /* Enable MSI-X for AENs for queue 0 */ 1912 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1913 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1914 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1915 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1916 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1917 if (ret) {
1918 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1919 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1920 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1921 qla24xx_disable_msix(ha);
1930 goto msix_out; 1922 ha->mqenable = 0;
1923 goto msix_out;
1924 }
1925 qentry->have_irq = 1;
1926 qentry->rsp = rsp;
1927 rsp->msix = qentry;
1931 } 1928 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1929
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1930 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1931 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1932 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1933
1961msix_out: 1934msix_out:
1962 kfree(entries); 1935 kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2036 }
2064} 2037}
2065 2038
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2039
2091int qla25xx_request_irq(struct rsp_que *rsp) 2040int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2041{
2093 struct qla_hw_data *ha = rsp->hw; 2042 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2043 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2044 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2045 int ret;
2097 2046
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2055 msix->rsp = rsp;
2107 return ret; 2056 return ret;
2108} 2057}
2058
2059struct scsi_qla_host *
2060qla25xx_get_host(struct rsp_que *rsp)
2061{
2062 srb_t *sp;
2063 struct qla_hw_data *ha = rsp->hw;
2064 struct scsi_qla_host *vha = NULL;
2065 struct sts_entry_24xx *pkt;
2066 struct req_que *req;
2067 uint16_t que;
2068 uint32_t handle;
2069
2070 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071 que = MSW(pkt->handle);
2072 handle = (uint32_t) LSW(pkt->handle);
2073 req = ha->req_q_map[que];
2074 if (handle < MAX_OUTSTANDING_COMMANDS) {
2075 sp = req->outstanding_cmds[handle];
2076 if (sp)
2077 return sp->fcport->vha;
2078 else
2079 goto base_que;
2080 }
2081base_que:
2082 vha = pci_get_drvdata(ha->pdev);
2083 return vha;
2084}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf4..451ece0760b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
408 * Context: 408 * Context:
409 * Kernel context. 409 * Kernel context.
410 */ 410 */
411void 411int
412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, 413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
414 uint32_t *mpi_caps, uint8_t *phy) 414 uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
427 mcp->flags = 0; 427 mcp->flags = 0;
428 mcp->tov = MBX_TOV_SECONDS; 428 mcp->tov = MBX_TOV_SECONDS;
429 rval = qla2x00_mailbox_command(vha, mcp); 429 rval = qla2x00_mailbox_command(vha, mcp);
430 if (rval != QLA_SUCCESS)
431 goto failed;
430 432
431 /* Return mailbox data. */ 433 /* Return mailbox data. */
432 *major = mcp->mb[1]; 434 *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
446 phy[1] = mcp->mb[9] >> 8; 448 phy[1] = mcp->mb[9] >> 8;
447 phy[2] = mcp->mb[9] & 0xff; 449 phy[2] = mcp->mb[9] & 0xff;
448 } 450 }
449 451failed:
450 if (rval != QLA_SUCCESS) { 452 if (rval != QLA_SUCCESS) {
451 /*EMPTY*/ 453 /*EMPTY*/
452 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 454 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
455 /*EMPTY*/ 457 /*EMPTY*/
456 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 458 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
457 } 459 }
460 return rval;
458} 461}
459 462
460/* 463/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 751 * Kernel context.
749 */ 752 */
750int 753int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 754qla2x00_abort_command(srb_t *sp)
752{ 755{
753 unsigned long flags = 0; 756 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 757 int rval;
756 uint32_t handle = 0; 758 uint32_t handle = 0;
757 mbx_cmd_t mc; 759 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 760 mbx_cmd_t *mcp = &mc;
761 fc_port_t *fcport = sp->fcport;
762 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = vha->req;
760 765
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 766 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 767
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 768 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 769 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 770 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 803}
801 804
802int 805int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 806qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 807{
805 int rval, rval2; 808 int rval, rval2;
806 mbx_cmd_t mc; 809 mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 816
814 l = l; 817 l = l;
815 vha = fcport->vha; 818 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 819 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 820 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 821 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 822 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 823 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 853}
851 854
852int 855int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 856qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 857{
855 int rval, rval2; 858 int rval, rval2;
856 mbx_cmd_t mc; 859 mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 865 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 866
864 vha = fcport->vha; 867 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 868 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 869 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 870 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 871 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 872 if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
931 mcp->mb[9] = vha->vp_idx; 934 mcp->mb[9] = vha->vp_idx;
932 mcp->out_mb = MBX_9|MBX_0; 935 mcp->out_mb = MBX_9|MBX_0;
933 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 936 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
937 if (IS_QLA81XX(vha->hw))
938 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
934 mcp->tov = MBX_TOV_SECONDS; 939 mcp->tov = MBX_TOV_SECONDS;
935 mcp->flags = 0; 940 mcp->flags = 0;
936 rval = qla2x00_mailbox_command(vha, mcp); 941 rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 957 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
953 vha->host_no, rval)); 958 vha->host_no, rval));
954 } else { 959 } else {
955 /*EMPTY*/
956 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 960 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
957 vha->host_no)); 961 vha->host_no));
962
963 if (IS_QLA81XX(vha->hw)) {
964 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
965 vha->fcoe_fcf_idx = mcp->mb[10];
966 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
967 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
968 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
969 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
970 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
971 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
972 }
958 } 973 }
959 974
960 return rval; 975 return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1252 1267
1253 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1254 mcp->out_mb = MBX_0; 1269 mcp->out_mb = MBX_0;
1255 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1270 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1256 mcp->tov = MBX_TOV_SECONDS; 1271 mcp->tov = MBX_TOV_SECONDS;
1257 mcp->flags = 0; 1272 mcp->flags = 0;
1258 rval = qla2x00_mailbox_command(vha, mcp); 1273 rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1261 states[0] = mcp->mb[1]; 1276 states[0] = mcp->mb[1];
1262 states[1] = mcp->mb[2]; 1277 states[1] = mcp->mb[2];
1263 states[2] = mcp->mb[3]; 1278 states[2] = mcp->mb[3];
1279 states[3] = mcp->mb[4];
1280 states[4] = mcp->mb[5];
1264 1281
1265 if (rval != QLA_SUCCESS) { 1282 if (rval != QLA_SUCCESS) {
1266 /*EMPTY*/ 1283 /*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1480 dma_addr_t lg_dma; 1497 dma_addr_t lg_dma;
1481 uint32_t iop[2]; 1498 uint32_t iop[2];
1482 struct qla_hw_data *ha = vha->hw; 1499 struct qla_hw_data *ha = vha->hw;
1500 struct req_que *req;
1501 struct rsp_que *rsp;
1483 1502
1484 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1503 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1485 1504
1505 if (ql2xmultique_tag)
1506 req = ha->req_q_map[0];
1507 else
1508 req = vha->req;
1509 rsp = req->rsp;
1510
1486 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1511 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1487 if (lg == NULL) { 1512 if (lg == NULL) {
1488 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1513 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1493 1518
1494 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1519 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1495 lg->entry_count = 1; 1520 lg->entry_count = 1;
1521 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1496 lg->nport_handle = cpu_to_le16(loop_id); 1522 lg->nport_handle = cpu_to_le16(loop_id);
1497 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1523 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1498 if (opt & BIT_0) 1524 if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1741 struct logio_entry_24xx *lg; 1767 struct logio_entry_24xx *lg;
1742 dma_addr_t lg_dma; 1768 dma_addr_t lg_dma;
1743 struct qla_hw_data *ha = vha->hw; 1769 struct qla_hw_data *ha = vha->hw;
1770 struct req_que *req;
1771 struct rsp_que *rsp;
1744 1772
1745 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1773 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1746 1774
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1752 } 1780 }
1753 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1781 memset(lg, 0, sizeof(struct logio_entry_24xx));
1754 1782
1783 if (ql2xmaxqueues > 1)
1784 req = ha->req_q_map[0];
1785 else
1786 req = vha->req;
1787 rsp = req->rsp;
1755 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1788 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1756 lg->entry_count = 1; 1789 lg->entry_count = 1;
1790 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1757 lg->nport_handle = cpu_to_le16(loop_id); 1791 lg->nport_handle = cpu_to_le16(loop_id);
1758 lg->control_flags = 1792 lg->control_flags =
1759 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1793 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1864 mbx_cmd_t mc; 1898 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc; 1899 mbx_cmd_t *mcp = &mc;
1866 1900
1867 if (IS_QLA81XX(vha->hw))
1868 return QLA_SUCCESS;
1869
1870 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1901 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1871 vha->host_no)); 1902 vha->host_no));
1872 1903
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2195} 2226}
2196 2227
2197int 2228int
2198qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2229qla24xx_abort_command(srb_t *sp)
2199{ 2230{
2200 int rval; 2231 int rval;
2201 fc_port_t *fcport;
2202 unsigned long flags = 0; 2232 unsigned long flags = 0;
2203 2233
2204 struct abort_entry_24xx *abt; 2234 struct abort_entry_24xx *abt;
2205 dma_addr_t abt_dma; 2235 dma_addr_t abt_dma;
2206 uint32_t handle; 2236 uint32_t handle;
2237 fc_port_t *fcport = sp->fcport;
2238 struct scsi_qla_host *vha = fcport->vha;
2207 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = vha->req;
2208 2241
2209 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2242 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2210 2243
2211 fcport = sp->fcport;
2212
2213 spin_lock_irqsave(&ha->hardware_lock, flags); 2244 spin_lock_irqsave(&ha->hardware_lock, flags);
2214 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2245 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2215 if (req->outstanding_cmds[handle] == sp) 2246 if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2231 2262
2232 abt->entry_type = ABORT_IOCB_TYPE; 2263 abt->entry_type = ABORT_IOCB_TYPE;
2233 abt->entry_count = 1; 2264 abt->entry_count = 1;
2265 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2234 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2266 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2235 abt->handle_to_abort = handle; 2267 abt->handle_to_abort = handle;
2236 abt->port_id[0] = fcport->d_id.b.al_pa; 2268 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
2272 2304
2273static int 2305static int
2274__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2306__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2275 unsigned int l) 2307 unsigned int l, int tag)
2276{ 2308{
2277 int rval, rval2; 2309 int rval, rval2;
2278 struct tsk_mgmt_cmd *tsk; 2310 struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2286 2318
2287 vha = fcport->vha; 2319 vha = fcport->vha;
2288 ha = vha->hw; 2320 ha = vha->hw;
2289 req = ha->req_q_map[0]; 2321 req = vha->req;
2290 rsp = ha->rsp_q_map[0]; 2322 if (ql2xmultique_tag)
2323 rsp = ha->rsp_q_map[tag + 1];
2324 else
2325 rsp = req->rsp;
2291 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2326 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2292 if (tsk == NULL) { 2327 if (tsk == NULL) {
2293 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2328 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2298 2333
2299 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2334 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2300 tsk->p.tsk.entry_count = 1; 2335 tsk->p.tsk.entry_count = 1;
2336 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2301 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2337 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2302 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2338 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2303 tsk->p.tsk.control_flags = cpu_to_le32(type); 2339 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2344} 2380}
2345 2381
2346int 2382int
2347qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2383qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2348{ 2384{
2349 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2385 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2350} 2386}
2351 2387
2352int 2388int
2353qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2389qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2354{ 2390{
2355 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2391 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2356} 2392}
2357 2393
2358int 2394int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2446 if (rval != QLA_SUCCESS) { 2482 if (rval != QLA_SUCCESS) {
2447 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2483 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2448 vha->host_no, rval)); 2484 vha->host_no, rval));
2485 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2486 rval = QLA_INVALID_COMMAND;
2449 } else { 2487 } else {
2450 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2488 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2451 } 2489 }
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2717 if (vp_idx == 0) 2755 if (vp_idx == 0)
2718 return; 2756 return;
2719 2757
2720 if (MSB(stat) == 1) 2758 if (MSB(stat) == 1) {
2759 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
2760 "VP[%d].\n", vha->host_no, vp_idx));
2721 return; 2761 return;
2762 }
2722 2763
2723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2764 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
2724 if (vp_idx == vp->vp_idx) 2765 if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3141 WRT_REG_DWORD(&reg->req_q_in, 0); 3182 WRT_REG_DWORD(&reg->req_q_in, 0);
3142 WRT_REG_DWORD(&reg->req_q_out, 0); 3183 WRT_REG_DWORD(&reg->req_q_out, 0);
3143 } 3184 }
3185 req->req_q_in = &reg->req_q_in;
3186 req->req_q_out = &reg->req_q_out;
3144 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3187 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3145 3188
3146 rval = qla2x00_mailbox_command(vha, mcp); 3189 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3167 mcp->mb[6] = MSW(MSD(rsp->dma)); 3210 mcp->mb[6] = MSW(MSD(rsp->dma));
3168 mcp->mb[7] = LSW(MSD(rsp->dma)); 3211 mcp->mb[7] = LSW(MSD(rsp->dma));
3169 mcp->mb[5] = rsp->length; 3212 mcp->mb[5] = rsp->length;
3170 mcp->mb[11] = rsp->vp_idx;
3171 mcp->mb[14] = rsp->msix->entry; 3213 mcp->mb[14] = rsp->msix->entry;
3172 mcp->mb[13] = rsp->rid; 3214 mcp->mb[13] = rsp->rid;
3173 3215
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3179 mcp->mb[8] = 0; 3221 mcp->mb[8] = 0;
3180 /* que out ptr index */ 3222 /* que out ptr index */
3181 mcp->mb[9] = 0; 3223 mcp->mb[9] = 0;
3182 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3224 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3183 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3225 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3184 mcp->in_mb = MBX_0; 3226 mcp->in_mb = MBX_0;
3185 mcp->flags = MBX_DMA_OUT; 3227 mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3384 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3426 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
3385 vha->host_no, rval, mcp->mb[0])); 3427 vha->host_no, rval, mcp->mb[0]));
3386 } else { 3428 } else {
3387 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3429 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3388 } 3430 }
3389 3431
3390 return rval; 3432 return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3428 3470
3429 return rval; 3471 return rval;
3430} 3472}
3473
3474int
3475qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3476 uint16_t size_in_bytes, uint16_t *actual_size)
3477{
3478 int rval;
3479 mbx_cmd_t mc;
3480 mbx_cmd_t *mcp = &mc;
3481
3482 if (!IS_QLA81XX(vha->hw))
3483 return QLA_FUNCTION_FAILED;
3484
3485 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3486
3487 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3488 mcp->mb[2] = MSW(stats_dma);
3489 mcp->mb[3] = LSW(stats_dma);
3490 mcp->mb[6] = MSW(MSD(stats_dma));
3491 mcp->mb[7] = LSW(MSD(stats_dma));
3492 mcp->mb[8] = size_in_bytes >> 2;
3493 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3494 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3498
3499 if (rval != QLA_SUCCESS) {
3500 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3501 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3502 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3503 } else {
3504 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3505
3506 *actual_size = mcp->mb[2] << 2;
3507 }
3508
3509 return rval;
3510}
3511
3512int
3513qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3514 uint16_t size)
3515{
3516 int rval;
3517 mbx_cmd_t mc;
3518 mbx_cmd_t *mcp = &mc;
3519
3520 if (!IS_QLA81XX(vha->hw))
3521 return QLA_FUNCTION_FAILED;
3522
3523 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3524
3525 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3526 mcp->mb[1] = 0;
3527 mcp->mb[2] = MSW(tlv_dma);
3528 mcp->mb[3] = LSW(tlv_dma);
3529 mcp->mb[6] = MSW(MSD(tlv_dma));
3530 mcp->mb[7] = LSW(MSD(tlv_dma));
3531 mcp->mb[8] = size;
3532 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3533 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3534 mcp->tov = MBX_TOV_SECONDS;
3535 mcp->flags = 0;
3536 rval = qla2x00_mailbox_command(vha, mcp);
3537
3538 if (rval != QLA_SUCCESS) {
3539 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3540 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3541 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3542 } else {
3543 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3544 }
3545
3546 return rval;
3547}
3548
3549int
3550qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3551{
3552 int rval;
3553 mbx_cmd_t mc;
3554 mbx_cmd_t *mcp = &mc;
3555
3556 if (!IS_FWI2_CAPABLE(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3558
3559 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3560
3561 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3562 mcp->mb[1] = LSW(risc_addr);
3563 mcp->mb[8] = MSW(risc_addr);
3564 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3565 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3566 mcp->tov = 30;
3567 mcp->flags = 0;
3568 rval = qla2x00_mailbox_command(vha, mcp);
3569 if (rval != QLA_SUCCESS) {
3570 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3571 vha->host_no, rval, mcp->mb[0]));
3572 } else {
3573 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3574 *data = mcp->mb[3] << 16 | mcp->mb[2];
3575 }
3576
3577 return rval;
3578}
3579
3580int
3581qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3582{
3583 int rval;
3584 mbx_cmd_t mc;
3585 mbx_cmd_t *mcp = &mc;
3586
3587 if (!IS_FWI2_CAPABLE(vha->hw))
3588 return QLA_FUNCTION_FAILED;
3589
3590 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3591
3592 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3593 mcp->mb[1] = LSW(risc_addr);
3594 mcp->mb[2] = LSW(data);
3595 mcp->mb[3] = MSW(data);
3596 mcp->mb[8] = MSW(risc_addr);
3597 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3598 mcp->in_mb = MBX_0;
3599 mcp->tov = 30;
3600 mcp->flags = 0;
3601 rval = qla2x00_mailbox_command(vha, mcp);
3602 if (rval != QLA_SUCCESS) {
3603 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3604 vha->host_no, rval, mcp->mb[0]));
3605 } else {
3606 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3607 }
3608
3609 return rval;
3610}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e300..650bcef08f2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -654,10 +633,19 @@ que_failed:
654 return 0; 633 return 0;
655} 634}
656 635
636static void qla_do_work(struct work_struct *work)
637{
638 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
639 struct scsi_qla_host *vha;
640
641 vha = qla25xx_get_host(rsp);
642 qla24xx_process_response_queue(vha, rsp);
643}
644
657/* create response queue */ 645/* create response queue */
658int 646int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 647qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 648 uint8_t vp_idx, uint16_t rid, int req)
661{ 649{
662 int ret = 0; 650 int ret = 0;
663 struct rsp_que *rsp = NULL; 651 struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 660 goto que_failed;
673 } 661 }
674 662
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 663 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 664 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 665 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 666 &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 671 }
684 672
685 mutex_lock(&ha->vport_lock); 673 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 674 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 675 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 676 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 677 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 678 "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 696 if (LSB(rsp->rid))
709 options |= BIT_5; 697 options |= BIT_5;
710 rsp->options = options; 698 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 699 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 700 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 701 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 714 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 715 goto que_failed;
730 } 716 }
717 if (req >= 0)
718 rsp->req = ha->req_q_map[req];
719 else
720 rsp->req = NULL;
731 721
732 qla2x00_init_response_q_entries(rsp); 722 qla2x00_init_response_q_entries(rsp);
733 723 if (rsp->hw->wq)
724 INIT_WORK(&rsp->q_work, qla_do_work);
734 return rsp->id; 725 return rsp->id;
735 726
736que_failed: 727que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 735 uint16_t options = 0;
745 uint8_t ret = 0; 736 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 737 struct qla_hw_data *ha = vha->hw;
738 struct rsp_que *rsp;
747 739
748 options |= BIT_1; 740 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 741 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 742 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 743 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 744 return ret;
753 } else 745 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 746 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
747 rsp = ha->rsp_q_map[ret];
755 748
756 options = 0; 749 options = 0;
757 if (qos & BIT_7) 750 if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 752 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 753 qos & ~BIT_7);
761 if (ret) { 754 if (ret) {
762 vha->req_ques[0] = ret; 755 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 756 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 757 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 758 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
759 rsp->req = ha->req_q_map[ret];
766 760
767 return ret; 761 return ret;
768} 762}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d..dcf011679c8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 77MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 78 "Maximum queue depth to report for target devices.");
79 79
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
80int ql2xqfullrampup = 120; 88int ql2xqfullrampup = 120;
81module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfullrampup, 90MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
96 "Enables MQ settings " 104 "Enables MQ settings "
97 "Default is 1 for single queue. Set it to number \ 105 "Default is 1 for single queue. Set it to number \
98 of queues in MQ mode."); 106 of queues in MQ mode.");
107
108int ql2xmultique_tag;
109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xmultique_tag,
111 "Enables CPU affinity settings for the driver "
112 "Default is 0 for no affinity of request and response IO. "
113 "Set it to 1 to turn on the cpu affinity.");
114
115int ql2xfwloadbin;
116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
117MODULE_PARM_DESC(ql2xfwloadbin,
118 "Option to specify location from which to load ISP firmware:\n"
119 " 2 -- load firmware via the request_firmware() (hotplug)\n"
120 " interface.\n"
121 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n");
123
99/* 124/*
100 * SCSI host template entry points 125 * SCSI host template entry points
101 */ 126 */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 212/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 214{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 215 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 216 GFP_KERNEL);
192 if (!ha->req_q_map) { 217 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 218 qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 220 goto fail_req_map;
196 } 221 }
197 222
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 223 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 224 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 225 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 226 qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
213 return -ENOMEM; 238 return -ENOMEM;
214} 239}
215 240
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 242{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 243 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 244 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 245 (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 249 req = NULL;
233} 250}
234 251
252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
253{
254 if (rsp && rsp->ring)
255 dma_free_coherent(&ha->pdev->dev,
256 (rsp->length + 1) * sizeof(response_t),
257 rsp->ring, rsp->dma);
258
259 kfree(rsp);
260 rsp = NULL;
261}
262
235static void qla2x00_free_queues(struct qla_hw_data *ha) 263static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 264{
237 struct req_que *req; 265 struct req_que *req;
238 struct rsp_que *rsp; 266 struct rsp_que *rsp;
239 int cnt; 267 int cnt;
240 268
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 269 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 270 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 271 qla2x00_free_req_que(ha, req);
272 }
273 kfree(ha->req_q_map);
274 ha->req_q_map = NULL;
275
276 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
277 rsp = ha->rsp_q_map[cnt];
278 qla2x00_free_rsp_que(ha, rsp);
245 } 279 }
246 kfree(ha->rsp_q_map); 280 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL; 281 ha->rsp_q_map = NULL;
282}
248 283
249 kfree(ha->req_q_map); 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
250 ha->req_q_map = NULL; 285{
286 uint16_t options = 0;
287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw;
289
290 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */
294 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
296 QLA_DEFAULT_QUE_QOS);
297 if (!req) {
298 qla_printk(KERN_WARNING, ha,
299 "Can't create request queue\n");
300 goto fail;
301 }
302 vha->req = ha->req_q_map[req];
303 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
305 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
306 if (!ret) {
307 qla_printk(KERN_WARNING, ha,
308 "Response Queue create failed\n");
309 goto fail2;
310 }
311 }
312 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n",
315 ha->max_rsp_queues, ha->max_req_queues));
316 }
317 return 0;
318fail2:
319 qla25xx_delete_queues(vha);
320fail:
321 ha->mqenable = 0;
322 return 1;
251} 323}
252 324
253static char * 325static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
387 459
388 sp->fcport = fcport; 460 sp->fcport = fcport;
389 sp->cmd = cmd; 461 sp->cmd = cmd;
390 sp->que = ha->req_q_map[0];
391 sp->flags = 0; 462 sp->flags = 0;
392 CMD_SP(cmd) = (void *)sp; 463 CMD_SP(cmd) = (void *)sp;
393 cmd->scsi_done = done; 464 cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 683void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 684qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 685{
615 int cnt, que, id; 686 int cnt;
616 unsigned long flags; 687 unsigned long flags;
617 srb_t *sp; 688 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 689 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 691 struct req_que *req;
621 692
622 spin_lock_irqsave(&ha->hardware_lock, flags); 693 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 694 req = vha->req;
624 id = vha->req_ques[que]; 695 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 696 sp = req->outstanding_cmds[cnt];
626 if (!req) 697 if (!sp)
698 continue;
699 if (sp->fcport != fcport)
627 continue; 700 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 701
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 703 if (ha->isp_ops->abort_command(sp)) {
704 DEBUG2(qla_printk(KERN_WARNING, ha,
705 "Abort failed -- %lx\n",
706 sp->cmd->serial_number));
707 } else {
708 if (qla2x00_eh_wait_on_command(sp->cmd) !=
709 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 710 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 711 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 712 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 713 }
714 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 715 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 717}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
693 unsigned long flags; 759 unsigned long flags;
694 int wait = 0; 760 int wait = 0;
695 struct qla_hw_data *ha = vha->hw; 761 struct qla_hw_data *ha = vha->hw;
696 struct req_que *req; 762 struct req_que *req = vha->req;
697 srb_t *spt; 763 srb_t *spt;
698 764
699 qla2x00_block_error_handler(cmd); 765 qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 spt = (srb_t *) CMD_SP(cmd); 775 spt = (srb_t *) CMD_SP(cmd);
710 if (!spt) 776 if (!spt)
711 return SUCCESS; 777 return SUCCESS;
712 req = spt->que;
713 778
714 /* Check active list for command command. */ 779 /* Check active list for command command. */
715 spin_lock_irqsave(&ha->hardware_lock, flags); 780 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 791 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 792
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 794 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 795 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 796 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 797 ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
777 return status; 842 return status;
778 843
779 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
780 req = sp->que; 845 req = vha->req;
781 for (cnt = 1; status == QLA_SUCCESS && 846 for (cnt = 1; status == QLA_SUCCESS &&
782 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 847 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
783 sp = req->outstanding_cmds[cnt]; 848 sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
820 885
821static int 886static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 887__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 888 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 889{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 890 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 891 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 906 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 907 goto eh_reset_failed;
843 err = 2; 908 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 909 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
910 != QLA_SUCCESS)
845 goto eh_reset_failed; 911 goto eh_reset_failed;
846 err = 3; 912 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 913 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
996 if (qla2x00_vp_abort_isp(vha)) 1062 if (qla2x00_vp_abort_isp(vha))
997 goto eh_host_reset_lock; 1063 goto eh_host_reset_lock;
998 } else { 1064 } else {
1065 if (ha->wq)
1066 flush_workqueue(ha->wq);
1067
999 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1068 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1000 if (qla2x00_abort_isp(base_vha)) { 1069 if (qla2x00_abort_isp(base_vha)) {
1001 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1070 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1037 struct fc_port *fcport; 1106 struct fc_port *fcport;
1038 struct qla_hw_data *ha = vha->hw; 1107 struct qla_hw_data *ha = vha->hw;
1039 1108
1040 if (ha->flags.enable_lip_full_login && !vha->vp_idx) { 1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
1110 !IS_QLA81XX(ha)) {
1041 ret = qla2x00_full_login_lip(vha); 1111 ret = qla2x00_full_login_lip(vha);
1042 if (ret != QLA_SUCCESS) { 1112 if (ret != QLA_SUCCESS) {
1043 DEBUG2_3(printk("%s(%ld): failed: " 1113 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 if (fcport->port_type != FCT_TARGET) 1134 if (fcport->port_type != FCT_TARGET)
1065 continue; 1135 continue;
1066 1136
1067 ret = ha->isp_ops->target_reset(fcport, 0); 1137 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1068 if (ret != QLA_SUCCESS) { 1138 if (ret != QLA_SUCCESS) {
1069 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1139 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1070 "target_reset=%d d_id=%x.\n", __func__, 1140 "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1088 struct req_que *req; 1158 struct req_que *req;
1089 1159
1090 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1091 for (que = 0; que < ha->max_queues; que++) { 1161 for (que = 0; que < ha->max_req_queues; que++) {
1092 req = ha->req_q_map[que]; 1162 req = ha->req_q_map[que];
1093 if (!req) 1163 if (!req)
1094 continue; 1164 continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1123 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1124 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1125 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1126 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1196 struct req_que *req = vha->req;
1127 1197
1128 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1129 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1511 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1581 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1512 break; 1582 break;
1513 } 1583 }
1584
1585 /* Get adapter physical port no from interrupt pin register. */
1586 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1587 if (ha->port_no & 1)
1588 ha->flags.port0 = 1;
1589 else
1590 ha->flags.port0 = 0;
1514} 1591}
1515 1592
1516static int 1593static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1518{ 1595{
1519 resource_size_t pio; 1596 resource_size_t pio;
1520 uint16_t msix; 1597 uint16_t msix;
1598 int cpus;
1521 1599
1522 if (pci_request_selected_regions(ha->pdev, ha->bars, 1600 if (pci_request_selected_regions(ha->pdev, ha->bars,
1523 QLA2XXX_DRIVER_NAME)) { 1601 QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
1571 } 1649 }
1572 1650
1573 /* Determine queue resources */ 1651 /* Determine queue resources */
1574 ha->max_queues = 1; 1652 ha->max_req_queues = ha->max_rsp_queues = 1;
1575 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1653 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1654 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1576 goto mqiobase_exit; 1655 goto mqiobase_exit;
1577 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1656 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1578 pci_resource_len(ha->pdev, 3)); 1657 pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
1582 ha->msix_count = msix; 1661 ha->msix_count = msix;
1583 /* Max queues are bounded by available msix vectors */ 1662 /* Max queues are bounded by available msix vectors */
1584 /* queue 0 uses two msix vectors */ 1663 /* queue 0 uses two msix vectors */
1585 if (ha->msix_count - 1 < ql2xmaxqueues) 1664 if (ql2xmultique_tag) {
1586 ha->max_queues = ha->msix_count - 1; 1665 cpus = num_online_cpus();
1587 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1666 ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
1588 ha->max_queues = QLA_MQ_SIZE; 1667 (cpus + 1) : (ha->msix_count - 1);
1589 else 1668 ha->max_req_queues = 2;
1590 ha->max_queues = ql2xmaxqueues; 1669 } else if (ql2xmaxqueues > 1) {
1670 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1671 QLA_MQ_SIZE : ql2xmaxqueues;
1672 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1673 " of request queues:%d\n", ha->max_req_queues));
1674 }
1591 qla_printk(KERN_INFO, ha, 1675 qla_printk(KERN_INFO, ha,
1592 "MSI-X vector count: %d\n", msix); 1676 "MSI-X vector count: %d\n", msix);
1593 } 1677 } else
1678 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1594 1679
1595mqiobase_exit: 1680mqiobase_exit:
1596 ha->msix_count = ha->max_queues + 1; 1681 ha->msix_count = ha->max_rsp_queues + 1;
1597 return (0); 1682 return (0);
1598 1683
1599iospace_error_exit: 1684iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1605{ 1690{
1606 scsi_qla_host_t *vha = shost_priv(shost); 1691 scsi_qla_host_t *vha = shost_priv(shost);
1607 1692
1693 if (vha->hw->flags.running_gold_fw)
1694 return;
1695
1608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1696 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1697 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1610 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1698 set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1768 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 1856 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1769 ha->gid_list_info_size = 8; 1857 ha->gid_list_info_size = 8;
1770 ha->optrom_size = OPTROM_SIZE_81XX; 1858 ha->optrom_size = OPTROM_SIZE_81XX;
1859 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1771 ha->isp_ops = &qla81xx_isp_ops; 1860 ha->isp_ops = &qla81xx_isp_ops;
1772 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 1861 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1773 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 1862 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1803 1892
1804 ret = -ENOMEM; 1893 ret = -ENOMEM;
1805 qla2x00_mem_free(ha); 1894 qla2x00_mem_free(ha);
1806 qla2x00_free_que(ha, req, rsp); 1895 qla2x00_free_req_que(ha, req);
1896 qla2x00_free_rsp_que(ha, rsp);
1807 goto probe_hw_failed; 1897 goto probe_hw_failed;
1808 } 1898 }
1809 1899
1810 pci_set_drvdata(pdev, base_vha); 1900 pci_set_drvdata(pdev, base_vha);
1811 1901
1812 host = base_vha->host; 1902 host = base_vha->host;
1813 base_vha->req_ques[0] = req->id; 1903 base_vha->req = req;
1814 host->can_queue = req->length + 128; 1904 host->can_queue = req->length + 128;
1815 if (IS_QLA2XXX_MIDTYPE(ha)) 1905 if (IS_QLA2XXX_MIDTYPE(ha))
1816 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1906 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1841 } 1931 }
1842 ha->rsp_q_map[0] = rsp; 1932 ha->rsp_q_map[0] = rsp;
1843 ha->req_q_map[0] = req; 1933 ha->req_q_map[0] = req;
1844 1934 rsp->req = req;
1935 req->rsp = rsp;
1936 set_bit(0, ha->req_qid_map);
1937 set_bit(0, ha->rsp_qid_map);
1845 /* FWI2-capable only. */ 1938 /* FWI2-capable only. */
1846 req->req_q_in = &ha->iobase->isp24.req_q_in; 1939 req->req_q_in = &ha->iobase->isp24.req_q_in;
1847 req->req_q_out = &ha->iobase->isp24.req_q_out; 1940 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1866 goto probe_failed; 1959 goto probe_failed;
1867 } 1960 }
1868 1961
1962 if (ha->mqenable)
1963 if (qla25xx_setup_mode(base_vha))
1964 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single"
1966 " queue mode\n");
1967
1968 if (ha->flags.running_gold_fw)
1969 goto skip_dpc;
1970
1869 /* 1971 /*
1870 * Startup the kernel thread for this host adapter 1972 * Startup the kernel thread for this host adapter
1871 */ 1973 */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1878 goto probe_failed; 1980 goto probe_failed;
1879 } 1981 }
1880 1982
1983skip_dpc:
1881 list_add_tail(&base_vha->list, &ha->vp_list); 1984 list_add_tail(&base_vha->list, &ha->vp_list);
1882 base_vha->host->irq = ha->pdev->irq; 1985 base_vha->host->irq = ha->pdev->irq;
1883 1986
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 return 0; 2020 return 0;
1918 2021
1919probe_init_failed: 2022probe_init_failed:
1920 qla2x00_free_que(ha, req, rsp); 2023 qla2x00_free_req_que(ha, req);
1921 ha->max_queues = 0; 2024 qla2x00_free_rsp_que(ha, rsp);
2025 ha->max_req_queues = ha->max_rsp_queues = 0;
1922 2026
1923probe_failed: 2027probe_failed:
1924 if (base_vha->timer_active) 2028 if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1976 2080
1977 base_vha->flags.online = 0; 2081 base_vha->flags.online = 0;
1978 2082
2083 /* Flush the work queue and remove it */
2084 if (ha->wq) {
2085 flush_workqueue(ha->wq);
2086 destroy_workqueue(ha->wq);
2087 ha->wq = NULL;
2088 }
2089
1979 /* Kill the kernel thread for this host */ 2090 /* Kill the kernel thread for this host */
1980 if (ha->dpc_thread) { 2091 if (ha->dpc_thread) {
1981 struct task_struct *t = ha->dpc_thread; 2092 struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2017{ 2128{
2018 struct qla_hw_data *ha = vha->hw; 2129 struct qla_hw_data *ha = vha->hw;
2019 2130
2131 qla25xx_delete_queues(vha);
2132
2020 if (ha->flags.fce_enabled) 2133 if (ha->flags.fce_enabled)
2021 qla2x00_disable_fce_trace(vha, NULL, NULL); 2134 qla2x00_disable_fce_trace(vha, NULL, NULL);
2022 2135
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2329 vfree(ha->fw_dump); 2442 vfree(ha->fw_dump);
2330 } 2443 }
2331 2444
2445 if (ha->dcbx_tlv)
2446 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2447 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2448
2449 if (ha->xgmac_data)
2450 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2451 ha->xgmac_data, ha->xgmac_data_dma);
2452
2332 if (ha->sns_cmd) 2453 if (ha->sns_cmd)
2333 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2454 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2334 ha->sns_cmd, ha->sns_cmd_dma); 2455 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2412 INIT_LIST_HEAD(&vha->work_list); 2533 INIT_LIST_HEAD(&vha->work_list);
2413 INIT_LIST_HEAD(&vha->list); 2534 INIT_LIST_HEAD(&vha->list);
2414 2535
2536 spin_lock_init(&vha->work_lock);
2537
2415 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 2538 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2416 return vha; 2539 return vha;
2417 2540
@@ -2420,13 +2543,11 @@ fail:
2420} 2543}
2421 2544
2422static struct qla_work_evt * 2545static struct qla_work_evt *
2423qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, 2546qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2424 int locked)
2425{ 2547{
2426 struct qla_work_evt *e; 2548 struct qla_work_evt *e;
2427 2549
2428 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2550 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2429 GFP_KERNEL);
2430 if (!e) 2551 if (!e)
2431 return NULL; 2552 return NULL;
2432 2553
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2437} 2558}
2438 2559
2439static int 2560static int
2440qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) 2561qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2441{ 2562{
2442 unsigned long uninitialized_var(flags); 2563 unsigned long flags;
2443 struct qla_hw_data *ha = vha->hw;
2444 2564
2445 if (!locked) 2565 spin_lock_irqsave(&vha->work_lock, flags);
2446 spin_lock_irqsave(&ha->hardware_lock, flags);
2447 list_add_tail(&e->list, &vha->work_list); 2566 list_add_tail(&e->list, &vha->work_list);
2567 spin_unlock_irqrestore(&vha->work_lock, flags);
2448 qla2xxx_wake_dpc(vha); 2568 qla2xxx_wake_dpc(vha);
2449 if (!locked) 2569
2450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451 return QLA_SUCCESS; 2570 return QLA_SUCCESS;
2452} 2571}
2453 2572
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2457{ 2576{
2458 struct qla_work_evt *e; 2577 struct qla_work_evt *e;
2459 2578
2460 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); 2579 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2461 if (!e) 2580 if (!e)
2462 return QLA_FUNCTION_FAILED; 2581 return QLA_FUNCTION_FAILED;
2463 2582
2464 e->u.aen.code = code; 2583 e->u.aen.code = code;
2465 e->u.aen.data = data; 2584 e->u.aen.data = data;
2466 return qla2x00_post_work(vha, e, 1); 2585 return qla2x00_post_work(vha, e);
2467} 2586}
2468 2587
2469int 2588int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2471{ 2590{
2472 struct qla_work_evt *e; 2591 struct qla_work_evt *e;
2473 2592
2474 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); 2593 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2475 if (!e) 2594 if (!e)
2476 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2477 2596
2478 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 2597 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2479 return qla2x00_post_work(vha, e, 1); 2598 return qla2x00_post_work(vha, e);
2480} 2599}
2481 2600
2482static void 2601static void
2483qla2x00_do_work(struct scsi_qla_host *vha) 2602qla2x00_do_work(struct scsi_qla_host *vha)
2484{ 2603{
2485 struct qla_work_evt *e; 2604 struct qla_work_evt *e, *tmp;
2486 struct qla_hw_data *ha = vha->hw; 2605 unsigned long flags;
2606 LIST_HEAD(work);
2487 2607
2488 spin_lock_irq(&ha->hardware_lock); 2608 spin_lock_irqsave(&vha->work_lock, flags);
2489 while (!list_empty(&vha->work_list)) { 2609 list_splice_init(&vha->work_list, &work);
2490 e = list_entry(vha->work_list.next, struct qla_work_evt, list); 2610 spin_unlock_irqrestore(&vha->work_lock, flags);
2611
2612 list_for_each_entry_safe(e, tmp, &work, list) {
2491 list_del_init(&e->list); 2613 list_del_init(&e->list);
2492 spin_unlock_irq(&ha->hardware_lock);
2493 2614
2494 switch (e->type) { 2615 switch (e->type) {
2495 case QLA_EVT_AEN: 2616 case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2502 } 2623 }
2503 if (e->flags & QLA_EVT_FLAG_FREE) 2624 if (e->flags & QLA_EVT_FLAG_FREE)
2504 kfree(e); 2625 kfree(e);
2505 spin_lock_irq(&ha->hardware_lock);
2506 } 2626 }
2507 spin_unlock_irq(&ha->hardware_lock);
2508} 2627}
2628
2509/* Relogins all the fcports of a vport 2629/* Relogins all the fcports of a vport
2510 * Context: dpc thread 2630 * Context: dpc thread
2511 */ 2631 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd..6260505dceb 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
219 wait_cnt = NVR_WAIT_CNT; 219 wait_cnt = NVR_WAIT_CNT;
220 do { 220 do {
221 if (!--wait_cnt) { 221 if (!--wait_cnt) {
222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(qla_printk(KERN_WARNING, ha,
223 __func__, vha->host_no)); 223 "NVRAM didn't go ready...\n"));
224 break; 224 break;
225 } 225 }
226 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
349 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
350 do { 350 do {
351 if (!--wait_cnt) { 351 if (!--wait_cnt) {
352 DEBUG9_10(qla_printk( 352 DEBUG9_10(qla_printk(KERN_WARNING, ha,
353 "NVRAM didn't go ready...\n")); 353 "NVRAM didn't go ready...\n"));
354 break; 354 break;
355 } 355 }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
408 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
409 do { 409 do {
410 if (!--wait_cnt) { 410 if (!--wait_cnt) {
411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); 411 DEBUG9_10(qla_printk(KERN_WARNING, ha,
412 "NVRAM didn't go ready...\n"));
412 break; 413 break;
413 } 414 }
414 NVRAM_DELAY(); 415 NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
701 break; 702 break;
702 case FLT_REG_VPD_0: 703 case FLT_REG_VPD_0:
703 ha->flt_region_vpd_nvram = start; 704 ha->flt_region_vpd_nvram = start;
704 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 705 if (ha->flags.port0)
705 ha->flt_region_vpd = start; 706 ha->flt_region_vpd = start;
706 break; 707 break;
707 case FLT_REG_VPD_1: 708 case FLT_REG_VPD_1:
708 if (PCI_FUNC(ha->pdev->devfn) & 1) 709 if (!ha->flags.port0)
709 ha->flt_region_vpd = start; 710 ha->flt_region_vpd = start;
710 break; 711 break;
711 case FLT_REG_NVRAM_0: 712 case FLT_REG_NVRAM_0:
712 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 713 if (ha->flags.port0)
713 ha->flt_region_nvram = start; 714 ha->flt_region_nvram = start;
714 break; 715 break;
715 case FLT_REG_NVRAM_1: 716 case FLT_REG_NVRAM_1:
716 if (PCI_FUNC(ha->pdev->devfn) & 1) 717 if (!ha->flags.port0)
717 ha->flt_region_nvram = start; 718 ha->flt_region_nvram = start;
718 break; 719 break;
719 case FLT_REG_FDT: 720 case FLT_REG_FDT:
720 ha->flt_region_fdt = start; 721 ha->flt_region_fdt = start;
721 break; 722 break;
722 case FLT_REG_NPIV_CONF_0: 723 case FLT_REG_NPIV_CONF_0:
723 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 724 if (ha->flags.port0)
724 ha->flt_region_npiv_conf = start; 725 ha->flt_region_npiv_conf = start;
725 break; 726 break;
726 case FLT_REG_NPIV_CONF_1: 727 case FLT_REG_NPIV_CONF_1:
727 if (PCI_FUNC(ha->pdev->devfn) & 1) 728 if (!ha->flags.port0)
728 ha->flt_region_npiv_conf = start; 729 ha->flt_region_npiv_conf = start;
729 break; 730 break;
731 case FLT_REG_GOLD_FW:
732 ha->flt_region_gold_fw = start;
733 break;
730 } 734 }
731 } 735 }
732 goto done; 736 goto done;
@@ -744,12 +748,12 @@ no_flash_data:
744 ha->flt_region_fw = def_fw[def]; 748 ha->flt_region_fw = def_fw[def];
745 ha->flt_region_boot = def_boot[def]; 749 ha->flt_region_boot = def_boot[def];
746 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 750 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
747 ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 751 ha->flt_region_vpd = ha->flags.port0 ?
748 def_vpd0[def]: def_vpd1[def]; 752 def_vpd0[def]: def_vpd1[def];
749 ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 753 ha->flt_region_nvram = ha->flags.port0 ?
750 def_nvram0[def]: def_nvram1[def]; 754 def_nvram0[def]: def_nvram1[def];
751 ha->flt_region_fdt = def_fdt[def]; 755 ha->flt_region_fdt = def_fdt[def];
752 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 756 ha->flt_region_npiv_conf = ha->flags.port0 ?
753 def_npiv_conf0[def]: def_npiv_conf1[def]; 757 def_npiv_conf0[def]: def_npiv_conf1[def];
754done: 758done:
755 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 759 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
924 struct fc_vport_identifiers vid; 928 struct fc_vport_identifiers vid;
925 struct fc_vport *vport; 929 struct fc_vport *vport;
926 930
931 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
932
927 flags = le16_to_cpu(entry->flags); 933 flags = le16_to_cpu(entry->flags);
928 if (flags == 0xffff) 934 if (flags == 0xffff)
929 continue; 935 continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
937 vid.port_name = wwn_to_u64(entry->port_name); 943 vid.port_name = wwn_to_u64(entry->port_name);
938 vid.node_name = wwn_to_u64(entry->node_name); 944 vid.node_name = wwn_to_u64(entry->node_name);
939 945
940 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
941
942 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
943 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
944 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
945 entry->q_qos, entry->f_qos)); 949 entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
955 } 959 }
956done: 960done:
957 kfree(data); 961 kfree(data);
958 ha->npiv_info = NULL;
959} 962}
960 963
961static int 964static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1079 0xff0000) | ((fdata >> 16) & 0xff)); 1082 0xff0000) | ((fdata >> 16) & 0xff));
1080 ret = qla24xx_erase_sector(vha, fdata); 1083 ret = qla24xx_erase_sector(vha, fdata);
1081 if (ret != QLA_SUCCESS) { 1084 if (ret != QLA_SUCCESS) {
1082 DEBUG9(qla_printk("Unable to erase sector: " 1085 DEBUG9(qla_printk(KERN_WARNING, ha,
1083 "address=%x.\n", faddr)); 1086 "Unable to erase sector: address=%x.\n",
1087 faddr));
1084 break; 1088 break;
1085 } 1089 }
1086 } 1090 }
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1240 ret = qla24xx_write_flash_dword(ha, 1244 ret = qla24xx_write_flash_dword(ha,
1241 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1245 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1242 if (ret != QLA_SUCCESS) { 1246 if (ret != QLA_SUCCESS) {
1243 DEBUG9(qla_printk("Unable to program nvram address=%x " 1247 DEBUG9(qla_printk(KERN_WARNING, ha,
1244 "data=%x.\n", naddr, *dwptr)); 1248 "Unable to program nvram address=%x data=%x.\n",
1249 naddr, *dwptr));
1245 break; 1250 break;
1246 } 1251 }
1247 } 1252 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a34..b63feaf4312 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k1" 10#define QLA2XXX_VERSION "8.03.01-k3"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afb..2de5f3ad640 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1225 * @starget: SCSI target pointer 1225 * @starget: SCSI target pointer
1226 * @lun: SCSI Logical Unit Number 1226 * @lun: SCSI Logical Unit Number
1227 * 1227 *
1228 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1228 * Description: Looks up the scsi_device with the specified @lun for a given
1229 * for a given host. The returned scsi_device has an additional reference that 1229 * @starget. The returned scsi_device has an additional reference that
1230 * needs to be released with scsi_device_put once you're done with it. 1230 * needs to be released with scsi_device_put once you're done with it.
1231 **/ 1231 **/
1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486..41a21772df1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
887static sector_t get_sdebug_capacity(void) 887static sector_t get_sdebug_capacity(void)
888{ 888{
889 if (scsi_debug_virtual_gb > 0) 889 if (scsi_debug_virtual_gb > 0)
890 return 2048 * 1024 * scsi_debug_virtual_gb; 890 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
891 else 891 else
892 return sdebug_store_sectors; 892 return sdebug_store_sectors;
893} 893}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be197..a1689353d7f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
641/** 641/**
642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
643 * @scmd: SCSI command structure to restore 643 * @scmd: SCSI command structure to restore
644 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 644 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
645 * 645 *
646 * Undo any damage done by above scsi_prep_eh_cmnd(). 646 * Undo any damage done by above scsi_eh_prep_cmnd().
647 */ 647 */
648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
649{ 649{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1451 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
1452 * 1452 *
1453 * Locking: 1453 * Locking:
1454 * We must be called from process context; scsi_allocate_request() 1454 * We must be called from process context.
1455 * may sleep.
1456 * 1455 *
1457 * Notes: 1456 * Notes:
1458 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1457 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1459 * head of the devices request queue, and continue. 1458 * head of the devices request queue, and continue.
1460 *
1461 * Bugs:
1462 * scsi_allocate_request() may sleep waiting for existing requests to
1463 * be processed. However, since we haven't kicked off any request
1464 * processing for this host, this may deadlock.
1465 *
1466 * If scsi_allocate_request() fails for what ever reason, we
1467 * completely forget to lock the door.
1468 */ 1459 */
1469static void scsi_eh_lock_door(struct scsi_device *sdev) 1460static void scsi_eh_lock_door(struct scsi_device *sdev)
1470{ 1461{
1471 struct request *req; 1462 struct request *req;
1472 1463
1464 /*
1465 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1466 * request becomes available
1467 */
1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1468 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1474 if (!req)
1475 return;
1476 1469
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1470 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0; 1471 req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e9..30f3275e119 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1359 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1360 struct Scsi_Host *shost = sdev->host;
1390 1361
1391 blkdev_dequeue_request(req); 1362 blk_start_request(req);
1392 1363
1393 if (unlikely(cmd == NULL)) { 1364 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1365 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1451
1481 if (!sdev) { 1452 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1453 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1454 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1455 scsi_kill_request(req, q);
1485 return; 1456 return;
1486 } 1457 }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1472 * that the request is fully prepared even if we cannot
1502 * accept it. 1473 * accept it.
1503 */ 1474 */
1504 req = elv_next_request(q); 1475 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1477 break;
1507 1478
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1488 * Remove the request from the request list.
1518 */ 1489 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1491 blk_start_request(req);
1521 sdev->device_busy++; 1492 sdev->device_busy++;
1522 1493
1523 spin_unlock(q->queue_lock); 1494 spin_unlock(q->queue_lock);
@@ -2441,20 +2412,18 @@ int
2441scsi_internal_device_unblock(struct scsi_device *sdev) 2412scsi_internal_device_unblock(struct scsi_device *sdev)
2442{ 2413{
2443 struct request_queue *q = sdev->request_queue; 2414 struct request_queue *q = sdev->request_queue;
2444 int err;
2445 unsigned long flags; 2415 unsigned long flags;
2446 2416
2447 /* 2417 /*
2448 * Try to transition the scsi device to SDEV_RUNNING 2418 * Try to transition the scsi device to SDEV_RUNNING
2449 * and goose the device queue if successful. 2419 * and goose the device queue if successful.
2450 */ 2420 */
2451 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2421 if (sdev->sdev_state == SDEV_BLOCK)
2452 if (err) { 2422 sdev->sdev_state = SDEV_RUNNING;
2453 err = scsi_device_set_state(sdev, SDEV_CREATED); 2423 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2454 2424 sdev->sdev_state = SDEV_CREATED;
2455 if (err) 2425 else
2456 return err; 2426 return -EINVAL;
2457 }
2458 2427
2459 spin_lock_irqsave(q->queue_lock, flags); 2428 spin_lock_irqsave(q->queue_lock, flags);
2460 blk_start_queue(q); 2429 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f51ca485f3..c4478380140 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
119 119
120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(inq_timeout, 121MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 20. Some devices may need more; most need less.");
124 124
125/* This lock protects only this list */ 125/* This lock protects only this list */
126static DEFINE_SPINLOCK(async_scan_lock); 126static DEFINE_SPINLOCK(async_scan_lock);
@@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
425 INIT_LIST_HEAD(&starget->devices); 425 INIT_LIST_HEAD(&starget->devices);
426 starget->state = STARGET_CREATED; 426 starget->state = STARGET_CREATED;
427 starget->scsi_level = SCSI_2; 427 starget->scsi_level = SCSI_2;
428 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
428 retry: 429 retry:
429 spin_lock_irqsave(shost->host_lock, flags); 430 spin_lock_irqsave(shost->host_lock, flags);
430 431
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6..10303272ba4 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 09479545529..f3e664628d7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid;
41 struct scsi_transport_template t; 40 struct scsi_transport_template t;
42 struct iscsi_transport *iscsi_transport; 41 struct iscsi_transport *iscsi_transport;
43 struct list_head list; 42 struct list_head list;
@@ -357,7 +356,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
357 err = 0; 356 err = 0;
358 break; 357 break;
359 case ISCSI_SESSION_FAILED: 358 case ISCSI_SESSION_FAILED:
360 err = DID_TRANSPORT_DISRUPTED << 16; 359 err = DID_IMM_RETRY << 16;
361 break; 360 break;
362 case ISCSI_SESSION_FREE: 361 case ISCSI_SESSION_FREE:
363 err = DID_TRANSPORT_FAILFAST << 16; 362 err = DID_TRANSPORT_FAILFAST << 16;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
938} 937}
939 938
940static int 939static int
941iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 940iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
942{ 941{
943 return netlink_broadcast(nls, skb, 0, 1, gfp); 942 return nlmsg_multicast(nls, skb, 0, group, gfp);
944}
945
946static int
947iscsi_unicast_skb(struct sk_buff *skb, int pid)
948{
949 int rc;
950
951 rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
952 if (rc < 0) {
953 printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
954 return rc;
955 }
956
957 return 0;
958} 943}
959 944
960int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 945int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
980 return -ENOMEM; 965 return -ENOMEM;
981 } 966 }
982 967
983 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 968 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
984 ev = NLMSG_DATA(nlh); 969 ev = NLMSG_DATA(nlh);
985 memset(ev, 0, sizeof(*ev)); 970 memset(ev, 0, sizeof(*ev));
986 ev->transport_handle = iscsi_handle(conn->transport); 971 ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
991 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 976 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
992 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 977 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
993 978
994 return iscsi_unicast_skb(skb, priv->daemon_pid); 979 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
995} 980}
996EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 981EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
997 982
983int iscsi_offload_mesg(struct Scsi_Host *shost,
984 struct iscsi_transport *transport, uint32_t type,
985 char *data, uint16_t data_size)
986{
987 struct nlmsghdr *nlh;
988 struct sk_buff *skb;
989 struct iscsi_uevent *ev;
990 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
991
992 skb = alloc_skb(len, GFP_NOIO);
993 if (!skb) {
994 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
995 return -ENOMEM;
996 }
997
998 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
999 ev = NLMSG_DATA(nlh);
1000 memset(ev, 0, sizeof(*ev));
1001 ev->type = type;
1002 ev->transport_handle = iscsi_handle(transport);
1003 switch (type) {
1004 case ISCSI_KEVENT_PATH_REQ:
1005 ev->r.req_path.host_no = shost->host_no;
1006 break;
1007 case ISCSI_KEVENT_IF_DOWN:
1008 ev->r.notify_if_down.host_no = shost->host_no;
1009 break;
1010 }
1011
1012 memcpy((char *)ev + sizeof(*ev), data, data_size);
1013
1014 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
1015}
1016EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1017
998void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) 1018void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
999{ 1019{
1000 struct nlmsghdr *nlh; 1020 struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1014 return; 1034 return;
1015 } 1035 }
1016 1036
1017 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1037 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1018 ev = NLMSG_DATA(nlh); 1038 ev = NLMSG_DATA(nlh);
1019 ev->transport_handle = iscsi_handle(conn->transport); 1039 ev->transport_handle = iscsi_handle(conn->transport);
1020 ev->type = ISCSI_KEVENT_CONN_ERROR; 1040 ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1022 ev->r.connerror.cid = conn->cid; 1042 ev->r.connerror.cid = conn->cid;
1023 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 1043 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
1024 1044
1025 iscsi_broadcast_skb(skb, GFP_ATOMIC); 1045 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1026 1046
1027 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1047 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1028 error); 1048 error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1030EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1050EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1031 1051
1032static int 1052static int
1033iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1053iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1034 void *payload, int size) 1054 void *payload, int size)
1035{ 1055{
1036 struct sk_buff *skb; 1056 struct sk_buff *skb;
1037 struct nlmsghdr *nlh; 1057 struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
1045 return -ENOMEM; 1065 return -ENOMEM;
1046 } 1066 }
1047 1067
1048 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 1068 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1049 nlh->nlmsg_flags = flags; 1069 nlh->nlmsg_flags = flags;
1050 memcpy(NLMSG_DATA(nlh), payload, size); 1070 memcpy(NLMSG_DATA(nlh), payload, size);
1051 return iscsi_unicast_skb(skb, pid); 1071 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1052} 1072}
1053 1073
1054static int 1074static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1085 return -ENOMEM; 1105 return -ENOMEM;
1086 } 1106 }
1087 1107
1088 nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, 1108 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1089 (len - sizeof(*nlhstat)), 0); 1109 (len - sizeof(*nlhstat)), 0);
1090 evstat = NLMSG_DATA(nlhstat); 1110 evstat = NLMSG_DATA(nlhstat);
1091 memset(evstat, 0, sizeof(*evstat)); 1111 memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1109 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1129 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1110 nlhstat->nlmsg_len = actual_size; 1130 nlhstat->nlmsg_len = actual_size;
1111 1131
1112 err = iscsi_unicast_skb(skbstat, priv->daemon_pid); 1132 err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
1133 GFP_ATOMIC);
1113 } while (err < 0 && err != -ECONNREFUSED); 1134 } while (err < 0 && err != -ECONNREFUSED);
1114 1135
1115 return err; 1136 return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1143 return -ENOMEM; 1164 return -ENOMEM;
1144 } 1165 }
1145 1166
1146 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1167 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1147 ev = NLMSG_DATA(nlh); 1168 ev = NLMSG_DATA(nlh);
1148 ev->transport_handle = iscsi_handle(session->transport); 1169 ev->transport_handle = iscsi_handle(session->transport);
1149 1170
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1172 * this will occur if the daemon is not up, so we just warn 1193 * this will occur if the daemon is not up, so we just warn
1173 * the user and when the daemon is restarted it will handle it 1194 * the user and when the daemon is restarted it will handle it
1174 */ 1195 */
1175 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1196 rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1176 if (rc == -ESRCH) 1197 if (rc == -ESRCH)
1177 iscsi_cls_session_printk(KERN_ERR, session, 1198 iscsi_cls_session_printk(KERN_ERR, session,
1178 "Cannot notify userspace of session " 1199 "Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1268 return err; 1289 return err;
1269} 1290}
1270 1291
1292static int iscsi_if_ep_connect(struct iscsi_transport *transport,
1293 struct iscsi_uevent *ev, int msg_type)
1294{
1295 struct iscsi_endpoint *ep;
1296 struct sockaddr *dst_addr;
1297 struct Scsi_Host *shost = NULL;
1298 int non_blocking, err = 0;
1299
1300 if (!transport->ep_connect)
1301 return -EINVAL;
1302
1303 if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
1304 shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
1305 if (!shost) {
1306 printk(KERN_ERR "ep connect failed. Could not find "
1307 "host no %u\n",
1308 ev->u.ep_connect_through_host.host_no);
1309 return -ENODEV;
1310 }
1311 non_blocking = ev->u.ep_connect_through_host.non_blocking;
1312 } else
1313 non_blocking = ev->u.ep_connect.non_blocking;
1314
1315 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1316 ep = transport->ep_connect(shost, dst_addr, non_blocking);
1317 if (IS_ERR(ep)) {
1318 err = PTR_ERR(ep);
1319 goto release_host;
1320 }
1321
1322 ev->r.ep_connect_ret.handle = ep->id;
1323release_host:
1324 if (shost)
1325 scsi_host_put(shost);
1326 return err;
1327}
1328
1271static int 1329static int
1272iscsi_if_transport_ep(struct iscsi_transport *transport, 1330iscsi_if_transport_ep(struct iscsi_transport *transport,
1273 struct iscsi_uevent *ev, int msg_type) 1331 struct iscsi_uevent *ev, int msg_type)
1274{ 1332{
1275 struct iscsi_endpoint *ep; 1333 struct iscsi_endpoint *ep;
1276 struct sockaddr *dst_addr;
1277 int rc = 0; 1334 int rc = 0;
1278 1335
1279 switch (msg_type) { 1336 switch (msg_type) {
1337 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1280 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1281 if (!transport->ep_connect) 1339 rc = iscsi_if_ep_connect(transport, ev, msg_type);
1282 return -EINVAL;
1283
1284 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1285 ep = transport->ep_connect(dst_addr,
1286 ev->u.ep_connect.non_blocking);
1287 if (IS_ERR(ep))
1288 return PTR_ERR(ep);
1289
1290 ev->r.ep_connect_ret.handle = ep->id;
1291 break; 1340 break;
1292 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1341 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1293 if (!transport->ep_poll) 1342 if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1365} 1414}
1366 1415
1367static int 1416static int
1368iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1417iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1418{
1419 struct Scsi_Host *shost;
1420 struct iscsi_path *params;
1421 int err;
1422
1423 if (!transport->set_path)
1424 return -ENOSYS;
1425
1426 shost = scsi_host_lookup(ev->u.set_path.host_no);
1427 if (!shost) {
1428 printk(KERN_ERR "set path could not find host no %u\n",
1429 ev->u.set_path.host_no);
1430 return -ENODEV;
1431 }
1432
1433 params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
1434 err = transport->set_path(shost, params);
1435
1436 scsi_host_put(shost);
1437 return err;
1438}
1439
1440static int
1441iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1369{ 1442{
1370 int err = 0; 1443 int err = 0;
1371 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1444 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1375 struct iscsi_cls_conn *conn; 1448 struct iscsi_cls_conn *conn;
1376 struct iscsi_endpoint *ep = NULL; 1449 struct iscsi_endpoint *ep = NULL;
1377 1450
1451 if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1452 *group = ISCSI_NL_GRP_UIP;
1453 else
1454 *group = ISCSI_NL_GRP_ISCSID;
1455
1378 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1456 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1379 if (!priv) 1457 if (!priv)
1380 return -EINVAL; 1458 return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1383 if (!try_module_get(transport->owner)) 1461 if (!try_module_get(transport->owner))
1384 return -EINVAL; 1462 return -EINVAL;
1385 1463
1386 priv->daemon_pid = NETLINK_CREDS(skb)->pid;
1387
1388 switch (nlh->nlmsg_type) { 1464 switch (nlh->nlmsg_type) {
1389 case ISCSI_UEVENT_CREATE_SESSION: 1465 case ISCSI_UEVENT_CREATE_SESSION:
1390 err = iscsi_if_create_session(priv, ep, ev, 1466 err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1469 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1545 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1470 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1546 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1471 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1547 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1548 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1472 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1549 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1473 break; 1550 break;
1474 case ISCSI_UEVENT_TGT_DSCVR: 1551 case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1477 case ISCSI_UEVENT_SET_HOST_PARAM: 1554 case ISCSI_UEVENT_SET_HOST_PARAM:
1478 err = iscsi_set_host_param(transport, ev); 1555 err = iscsi_set_host_param(transport, ev);
1479 break; 1556 break;
1557 case ISCSI_UEVENT_PATH_UPDATE:
1558 err = iscsi_set_path(transport, ev);
1559 break;
1480 default: 1560 default:
1481 err = -ENOSYS; 1561 err = -ENOSYS;
1482 break; 1562 break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
1499 uint32_t rlen; 1579 uint32_t rlen;
1500 struct nlmsghdr *nlh; 1580 struct nlmsghdr *nlh;
1501 struct iscsi_uevent *ev; 1581 struct iscsi_uevent *ev;
1582 uint32_t group;
1502 1583
1503 nlh = nlmsg_hdr(skb); 1584 nlh = nlmsg_hdr(skb);
1504 if (nlh->nlmsg_len < sizeof(*nlh) || 1585 if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
1511 if (rlen > skb->len) 1592 if (rlen > skb->len)
1512 rlen = skb->len; 1593 rlen = skb->len;
1513 1594
1514 err = iscsi_if_recv_msg(skb, nlh); 1595 err = iscsi_if_recv_msg(skb, nlh, &group);
1515 if (err) { 1596 if (err) {
1516 ev->type = ISCSI_KEVENT_IF_ERROR; 1597 ev->type = ISCSI_KEVENT_IF_ERROR;
1517 ev->iferror = err; 1598 ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
1525 */ 1606 */
1526 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 1607 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
1527 break; 1608 break;
1528 err = iscsi_if_send_reply( 1609 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
1529 NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
1530 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 1610 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
1531 } while (err < 0 && err != -ECONNREFUSED); 1611 } while (err < 0 && err != -ECONNREFUSED);
1532 skb_pull(skb, rlen); 1612 skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1774 if (!priv) 1854 if (!priv)
1775 return NULL; 1855 return NULL;
1776 INIT_LIST_HEAD(&priv->list); 1856 INIT_LIST_HEAD(&priv->list);
1777 priv->daemon_pid = -1;
1778 priv->iscsi_transport = tt; 1857 priv->iscsi_transport = tt;
1779 priv->t.user_scan = iscsi_user_scan; 1858 priv->t.user_scan = iscsi_user_scan;
1780 priv->t.create_work_queue = 1; 1859 priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2..d606452297c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b63..878b17a9af3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
@@ -1510,7 +1510,7 @@ got_data:
1510 */ 1510 */
1511 sector_size = 512; 1511 sector_size = 512;
1512 } 1512 }
1513 blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1514
1515 { 1515 {
1516 char cap_str_2[10], cap_str_10[10]; 1516 char cap_str_2[10], cap_str_10[10];
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1902 index = sdkp->index; 1902 index = sdkp->index;
1903 dev = &sdp->sdev_gendev; 1903 dev = &sdp->sdev_gendev;
1904 1904
1905 if (!sdp->request_queue->rq_timeout) {
1906 if (sdp->type != TYPE_MOD)
1907 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1908 else
1909 blk_queue_rq_timeout(sdp->request_queue,
1910 SD_MOD_TIMEOUT);
1911 }
1912
1913 device_initialize(&sdkp->dev);
1914 sdkp->dev.parent = &sdp->sdev_gendev;
1915 sdkp->dev.class = &sd_disk_class;
1916 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1917
1918 if (device_add(&sdkp->dev))
1919 goto out_free_index;
1920
1921 get_device(&sdp->sdev_gendev);
1922
1923 if (index < SD_MAX_DISKS) { 1905 if (index < SD_MAX_DISKS) {
1924 gd->major = sd_major((index & 0xf0) >> 4); 1906 gd->major = sd_major((index & 0xf0) >> 4);
1925 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1907 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1954 1936
1955 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1937 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1956 sdp->removable ? "removable " : ""); 1938 sdp->removable ? "removable " : "");
1957
1958 return;
1959
1960 out_free_index:
1961 ida_remove(&sd_index_ida, index);
1962} 1939}
1963 1940
1964/** 1941/**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
2026 sdkp->openers = 0; 2003 sdkp->openers = 0;
2027 sdkp->previous_state = 1; 2004 sdkp->previous_state = 1;
2028 2005
2006 if (!sdp->request_queue->rq_timeout) {
2007 if (sdp->type != TYPE_MOD)
2008 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2009 else
2010 blk_queue_rq_timeout(sdp->request_queue,
2011 SD_MOD_TIMEOUT);
2012 }
2013
2014 device_initialize(&sdkp->dev);
2015 sdkp->dev.parent = &sdp->sdev_gendev;
2016 sdkp->dev.class = &sd_disk_class;
2017 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
2018
2019 if (device_add(&sdkp->dev))
2020 goto out_free_index;
2021
2022 get_device(&sdp->sdev_gendev);
2023
2029 async_schedule(sd_probe_async, sdkp); 2024 async_schedule(sd_probe_async, sdkp);
2030 2025
2031 return 0; 2026 return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
2055 **/ 2050 **/
2056static int sd_remove(struct device *dev) 2051static int sd_remove(struct device *dev)
2057{ 2052{
2058 struct scsi_disk *sdkp = dev_get_drvdata(dev); 2053 struct scsi_disk *sdkp;
2059 2054
2055 async_synchronize_full();
2056 sdkp = dev_get_drvdata(dev);
2060 device_del(&sdkp->dev); 2057 device_del(&sdkp->dev);
2061 del_gendisk(sdkp->disk); 2058 del_gendisk(sdkp->disk);
2062 sd_shutdown(dev); 2059 sd_shutdown(dev);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff49279..82f14a9482d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd4..8201387b4da 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 q->max_phys_segments); 293 queue_max_phys_segments(q));
294 } 294 }
295 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp; 296 filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
909 if (val < 0) 909 if (val < 0)
910 return -EINVAL; 910 return -EINVAL;
911 val = min_t(int, val, 911 val = min_t(int, val,
912 sdp->device->request_queue->max_sectors * 512); 912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) { 913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called) 914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY; 915 return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
919 return 0; 919 return 0;
920 case SG_GET_RESERVED_SIZE: 920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen, 921 val = min_t(int, sfp->reserve.bufflen,
922 sdp->device->request_queue->max_sectors * 512); 922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip); 923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q: 924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip); 925 result = get_user(val, ip);
@@ -1059,12 +1059,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 return -ENODEV; 1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p); 1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET: 1061 case BLKSECTGET:
1062 return put_user(sdp->device->request_queue->max_sectors * 512, 1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip); 1063 ip);
1064 case BLKTRACESETUP: 1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue, 1065 return blk_trace_setup(sdp->device->request_queue,
1066 sdp->disk->disk_name, 1066 sdp->disk->disk_name,
1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1068 NULL,
1068 (char *)arg); 1069 (char *)arg);
1069 case BLKTRACESTART: 1070 case BLKTRACESTART:
1070 return blk_trace_startstop(sdp->device->request_queue, 1); 1071 return blk_trace_startstop(sdp->device->request_queue, 1);
@@ -1260,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1260 1261
1261 sense = rq->sense; 1262 sense = rq->sense;
1262 result = rq->errors; 1263 result = rq->errors;
1263 resid = rq->data_len; 1264 resid = rq->resid_len;
1264 1265
1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1266 sdp->disk->disk_name, srp->header.pack_id, result)); 1267 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1377,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1377 sdp->device = scsidp; 1378 sdp->device = scsidp;
1378 INIT_LIST_HEAD(&sdp->sfds); 1379 INIT_LIST_HEAD(&sdp->sfds);
1379 init_waitqueue_head(&sdp->o_excl_wait); 1380 init_waitqueue_head(&sdp->o_excl_wait);
1380 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1381 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1382 queue_max_phys_segments(q));
1381 sdp->index = k; 1383 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1384 kref_init(&sdp->d_ref);
1383 1385
@@ -2055,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2055 sg_big_buff = def_reserved_size; 2057 sg_big_buff = def_reserved_size;
2056 2058
2057 bufflen = min_t(int, sg_big_buff, 2059 bufflen = min_t(int, sg_big_buff,
2058 sdp->device->request_queue->max_sectors * 512); 2060 queue_max_sectors(sdp->device->request_queue) * 512);
2059 sg_build_reserve(sfp, bufflen); 2061 sg_build_reserve(sfp, bufflen);
2060 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2062 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2061 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2063 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad..cd350dfc121 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
726 } 727 }
727 728
728 queue = cd->device->request_queue; 729 queue = cd->device->request_queue;
729 blk_queue_hardsect_size(queue, sector_size); 730 blk_queue_logical_block_size(queue, sector_size);
730 731
731 return; 732 return;
732} 733}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f1..b33d04250bb 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2964 !(STp->use_pf & PF_TESTED)) { 2964 !(STp->use_pf & PF_TESTED)) {
2965 /* Try the other possible state of Page Format if not 2965 /* Try the other possible state of Page Format if not
2966 already tried */ 2966 already tried */
2967 STp->use_pf = !STp->use_pf | PF_TESTED; 2967 STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
2968 st_release_request(SRpnt); 2968 st_release_request(SRpnt);
2969 SRpnt = NULL; 2969 SRpnt = NULL;
2970 return st_int_ioctl(STp, cmd_in, arg); 2970 return st_int_ioctl(STp, cmd_in, arg);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(SDp->request_queue->max_hw_segments, 3986 i = min(queue_max_hw_segments(SDp->request_queue),
3987 SDp->request_queue->max_phys_segments); 3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec826..45374d66d26 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
737 struct sym_hcb *np = sym_get_hcb(sdev->host); 737 struct sym_hcb *np = sym_get_hcb(sdev->host);
738 struct sym_tcb *tp = &np->target[sdev->id]; 738 struct sym_tcb *tp = &np->target[sdev->id];
739 struct sym_lcb *lp; 739 struct sym_lcb *lp;
740 unsigned long flags;
741 int error;
740 742
741 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 743 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
742 return -ENXIO; 744 return -ENXIO;
743 745
744 tp->starget = sdev->sdev_target; 746 spin_lock_irqsave(np->s.host->host_lock, flags);
747
745 /* 748 /*
746 * Fail the device init if the device is flagged NOSCAN at BOOT in 749 * Fail the device init if the device is flagged NOSCAN at BOOT in
747 * the NVRAM. This may speed up boot and maintain coherency with 750 * the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
753 756
754 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 757 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
755 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 758 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
756 starget_printk(KERN_INFO, tp->starget, 759 starget_printk(KERN_INFO, sdev->sdev_target,
757 "Scan at boot disabled in NVRAM\n"); 760 "Scan at boot disabled in NVRAM\n");
758 return -ENXIO; 761 error = -ENXIO;
762 goto out;
759 } 763 }
760 764
761 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 765 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
762 if (sdev->lun != 0) 766 if (sdev->lun != 0) {
763 return -ENXIO; 767 error = -ENXIO;
764 starget_printk(KERN_INFO, tp->starget, 768 goto out;
769 }
770 starget_printk(KERN_INFO, sdev->sdev_target,
765 "Multiple LUNs disabled in NVRAM\n"); 771 "Multiple LUNs disabled in NVRAM\n");
766 } 772 }
767 773
768 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 774 lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
769 if (!lp) 775 if (!lp) {
770 return -ENOMEM; 776 error = -ENOMEM;
777 goto out;
778 }
779 if (tp->nlcb == 1)
780 tp->starget = sdev->sdev_target;
771 781
772 spi_min_period(tp->starget) = tp->usr_period; 782 spi_min_period(tp->starget) = tp->usr_period;
773 spi_max_width(tp->starget) = tp->usr_width; 783 spi_max_width(tp->starget) = tp->usr_width;
774 784
775 return 0; 785 error = 0;
786out:
787 spin_unlock_irqrestore(np->s.host->host_lock, flags);
788
789 return error;
776} 790}
777 791
778/* 792/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
819static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 833static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
820{ 834{
821 struct sym_hcb *np = sym_get_hcb(sdev->host); 835 struct sym_hcb *np = sym_get_hcb(sdev->host);
822 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 836 struct sym_tcb *tp = &np->target[sdev->id];
837 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
838 unsigned long flags;
839
840 spin_lock_irqsave(np->s.host->host_lock, flags);
841
842 if (lp->busy_itlq || lp->busy_itl) {
843 /*
844 * This really shouldn't happen, but we can't return an error
845 * so let's try to stop all on-going I/O.
846 */
847 starget_printk(KERN_WARNING, tp->starget,
848 "Removing busy LCB (%d)\n", sdev->lun);
849 sym_reset_scsi_bus(np, 1);
850 }
823 851
824 if (lp->itlq_tbl) 852 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
825 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 853 /*
826 kfree(lp->cb_tags); 854 * It was the last unit for this target.
827 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 855 */
856 tp->head.sval = 0;
857 tp->head.wval = np->rv_scntl3;
858 tp->head.uval = 0;
859 tp->tgoal.check_nego = 1;
860 tp->starget = NULL;
861 }
862
863 spin_unlock_irqrestore(np->s.host->host_lock, flags);
828} 864}
829 865
830/* 866/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
890 if (!((uc->target >> t) & 1)) 926 if (!((uc->target >> t) & 1))
891 continue; 927 continue;
892 tp = &np->target[t]; 928 tp = &np->target[t];
929 if (!tp->nlcb)
930 continue;
893 931
894 switch (uc->cmd) { 932 switch (uc->cmd) {
895 933
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed18..69ad4945c93 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
1896 tp->head.sval = 0; 1896 tp->head.sval = 0;
1897 tp->head.wval = np->rv_scntl3; 1897 tp->head.wval = np->rv_scntl3;
1898 tp->head.uval = 0; 1898 tp->head.uval = 0;
1899 if (tp->lun0p)
1900 tp->lun0p->to_clear = 0;
1901 if (tp->lunmp) {
1902 int ln;
1903
1904 for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
1905 if (tp->lunmp[ln])
1906 tp->lunmp[ln]->to_clear = 0;
1907 }
1899 } 1908 }
1900 1909
1901 /* 1910 /*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4988 */ 4997 */
4989 if (ln && !tp->lunmp) { 4998 if (ln && !tp->lunmp) {
4990 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), 4999 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
4991 GFP_KERNEL); 5000 GFP_ATOMIC);
4992 if (!tp->lunmp) 5001 if (!tp->lunmp)
4993 goto fail; 5002 goto fail;
4994 } 5003 }
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
5008 tp->lun0p = lp; 5017 tp->lun0p = lp;
5009 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 5018 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5010 } 5019 }
5020 tp->nlcb++;
5011 5021
5012 /* 5022 /*
5013 * Let the itl task point to error handling. 5023 * Let the itl task point to error handling.
@@ -5085,6 +5095,43 @@ fail:
5085} 5095}
5086 5096
5087/* 5097/*
5098 * Lun control block deallocation. Returns the number of valid remaing LCBs
5099 * for the target.
5100 */
5101int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
5102{
5103 struct sym_tcb *tp = &np->target[tn];
5104 struct sym_lcb *lp = sym_lp(tp, ln);
5105
5106 tp->nlcb--;
5107
5108 if (ln) {
5109 if (!tp->nlcb) {
5110 kfree(tp->lunmp);
5111 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5112 tp->lunmp = NULL;
5113 tp->luntbl = NULL;
5114 tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
5115 } else {
5116 tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
5117 tp->lunmp[ln] = NULL;
5118 }
5119 } else {
5120 tp->lun0p = NULL;
5121 tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
5122 }
5123
5124 if (lp->itlq_tbl) {
5125 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5126 kfree(lp->cb_tags);
5127 }
5128
5129 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5130
5131 return tp->nlcb;
5132}
5133
5134/*
5088 * Queue a SCSI IO to the controller. 5135 * Queue a SCSI IO to the controller.
5089 */ 5136 */
5090int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 5137int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6b..053e63c8682 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
401 * An array of bus addresses is used on reselection. 401 * An array of bus addresses is used on reselection.
402 */ 402 */
403 u32 *luntbl; /* LCBs bus address table */ 403 u32 *luntbl; /* LCBs bus address table */
404 int nlcb; /* Number of valid LCBs (including LUN #0) */
404 405
405 /* 406 /*
406 * LUN table used by the C code. 407 * LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
1065struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1066struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1066void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1067void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1067struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1068struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1068int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1070int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1069int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1071int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1070int sym_reset_scsi_target(struct sym_hcb *np, int target); 1072int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cb..54023d41fd1 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807